SelectionDAGBuilder.cpp revision 94ff09ba5fca224ef3467e96eb30b91ddc100f7e
1//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating from LLVM IR into SelectionDAG IR.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "SelectionDAGBuilder.h"
16#include "FunctionLoweringInfo.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Constants.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/GlobalVariable.h"
25#include "llvm/InlineAsm.h"
26#include "llvm/Instructions.h"
27#include "llvm/Intrinsics.h"
28#include "llvm/IntrinsicInst.h"
29#include "llvm/LLVMContext.h"
30#include "llvm/Module.h"
31#include "llvm/CodeGen/FastISel.h"
32#include "llvm/CodeGen/GCStrategy.h"
33#include "llvm/CodeGen/GCMetadata.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/PseudoSourceValue.h"
41#include "llvm/CodeGen/SelectionDAG.h"
42#include "llvm/CodeGen/DwarfWriter.h"
43#include "llvm/Analysis/DebugInfo.h"
44#include "llvm/Target/TargetRegisterInfo.h"
45#include "llvm/Target/TargetData.h"
46#include "llvm/Target/TargetFrameInfo.h"
47#include "llvm/Target/TargetInstrInfo.h"
48#include "llvm/Target/TargetIntrinsicInfo.h"
49#include "llvm/Target/TargetLowering.h"
50#include "llvm/Target/TargetOptions.h"
51#include "llvm/Support/Compiler.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MathExtras.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58using namespace llvm;
59
60/// LimitFloatPrecision - Generate low-precision inline sequences for
61/// some float libcalls (6, 8 or 12 bits).
62static unsigned LimitFloatPrecision;
63
64static cl::opt<unsigned, true>
65LimitFPPrecision("limit-float-precision",
66                 cl::desc("Generate low-precision inline sequences "
67                          "for some float libcalls"),
68                 cl::location(LimitFloatPrecision),
69                 cl::init(0));
70
71namespace {
72  /// RegsForValue - This struct represents the registers (physical or virtual)
73  /// that a particular set of values is assigned, and the type information about
74  /// the value. The most common situation is to represent one value at a time,
75  /// but struct or array values are handled element-wise as multiple values.
76  /// The splitting of aggregates is performed recursively, so that we never
77  /// have aggregate-typed registers. The values at this point do not necessarily
78  /// have legal types, so each value may require one or more registers of some
79  /// legal type.
80  ///
81  struct RegsForValue {
82    /// TLI - The TargetLowering object.
83    ///
84    const TargetLowering *TLI;
85
86    /// ValueVTs - The value types of the values, which may not be legal, and
87    /// may need be promoted or synthesized from one or more registers.
88    ///
89    SmallVector<EVT, 4> ValueVTs;
90
91    /// RegVTs - The value types of the registers. This is the same size as
92    /// ValueVTs and it records, for each value, what the type of the assigned
93    /// register or registers are. (Individual values are never synthesized
94    /// from more than one type of register.)
95    ///
96    /// With virtual registers, the contents of RegVTs is redundant with TLI's
97    /// getRegisterType member function, however when with physical registers
98    /// it is necessary to have a separate record of the types.
99    ///
100    SmallVector<EVT, 4> RegVTs;
101
102    /// Regs - This list holds the registers assigned to the values.
103    /// Each legal or promoted value requires one register, and each
104    /// expanded value requires multiple registers.
105    ///
106    SmallVector<unsigned, 4> Regs;
107
108    RegsForValue() : TLI(0) {}
109
110    RegsForValue(const TargetLowering &tli,
111                 const SmallVector<unsigned, 4> &regs,
112                 EVT regvt, EVT valuevt)
113      : TLI(&tli),  ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
114    RegsForValue(const TargetLowering &tli,
115                 const SmallVector<unsigned, 4> &regs,
116                 const SmallVector<EVT, 4> &regvts,
117                 const SmallVector<EVT, 4> &valuevts)
118      : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
119    RegsForValue(LLVMContext &Context, const TargetLowering &tli,
120                 unsigned Reg, const Type *Ty) : TLI(&tli) {
121      ComputeValueVTs(tli, Ty, ValueVTs);
122
123      for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
124        EVT ValueVT = ValueVTs[Value];
125        unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
126        EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
127        for (unsigned i = 0; i != NumRegs; ++i)
128          Regs.push_back(Reg + i);
129        RegVTs.push_back(RegisterVT);
130        Reg += NumRegs;
131      }
132    }
133
134    /// append - Add the specified values to this one.
135    void append(const RegsForValue &RHS) {
136      TLI = RHS.TLI;
137      ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
138      RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
139      Regs.append(RHS.Regs.begin(), RHS.Regs.end());
140    }
141
142
143    /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
144    /// this value and returns the result as a ValueVTs value.  This uses
145    /// Chain/Flag as the input and updates them for the output Chain/Flag.
146    /// If the Flag pointer is NULL, no flag is used.
147    SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
148                            SDValue &Chain, SDValue *Flag) const;
149
150    /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
151    /// specified value into the registers specified by this object.  This uses
152    /// Chain/Flag as the input and updates them for the output Chain/Flag.
153    /// If the Flag pointer is NULL, no flag is used.
154    void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
155                       unsigned Order, SDValue &Chain, SDValue *Flag) const;
156
157    /// AddInlineAsmOperands - Add this value to the specified inlineasm node
158    /// operand list.  This adds the code marker, matching input operand index
159    /// (if applicable), and includes the number of values added into it.
160    void AddInlineAsmOperands(unsigned Code,
161                              bool HasMatching, unsigned MatchingIdx,
162                              SelectionDAG &DAG, unsigned Order,
163                              std::vector<SDValue> &Ops) const;
164  };
165}
166
167/// getCopyFromParts - Create a value that contains the specified legal parts
168/// combined into the value they represent.  If the parts combine to a type
169/// larger then ValueVT then AssertOp can be used to specify whether the extra
170/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
171/// (ISD::AssertSext).
172static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
173                                const SDValue *Parts,
174                                unsigned NumParts, EVT PartVT, EVT ValueVT,
175                                ISD::NodeType AssertOp = ISD::DELETED_NODE) {
176  assert(NumParts > 0 && "No parts to assemble!");
177  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
178  SDValue Val = Parts[0];
179  if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
180
181  if (NumParts > 1) {
182    // Assemble the value from multiple parts.
183    if (!ValueVT.isVector() && ValueVT.isInteger()) {
184      unsigned PartBits = PartVT.getSizeInBits();
185      unsigned ValueBits = ValueVT.getSizeInBits();
186
187      // Assemble the power of 2 part.
188      unsigned RoundParts = NumParts & (NumParts - 1) ?
189        1 << Log2_32(NumParts) : NumParts;
190      unsigned RoundBits = PartBits * RoundParts;
191      EVT RoundVT = RoundBits == ValueBits ?
192        ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
193      SDValue Lo, Hi;
194
195      EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
196
197      if (RoundParts > 2) {
198        Lo = getCopyFromParts(DAG, dl, Order, Parts, RoundParts / 2,
199                              PartVT, HalfVT);
200        Hi = getCopyFromParts(DAG, dl, Order, Parts + RoundParts / 2,
201                              RoundParts / 2, PartVT, HalfVT);
202      } else {
203        Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
204        Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
205      }
206
207      if (TLI.isBigEndian())
208        std::swap(Lo, Hi);
209
210      Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
211
212      if (DisableScheduling) {
213        DAG.AssignOrdering(Lo.getNode(), Order);
214        DAG.AssignOrdering(Hi.getNode(), Order);
215        DAG.AssignOrdering(Val.getNode(), Order);
216      }
217
218      if (RoundParts < NumParts) {
219        // Assemble the trailing non-power-of-2 part.
220        unsigned OddParts = NumParts - RoundParts;
221        EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
222        Hi = getCopyFromParts(DAG, dl, Order,
223                              Parts + RoundParts, OddParts, PartVT, OddVT);
224
225        // Combine the round and odd parts.
226        Lo = Val;
227        if (TLI.isBigEndian())
228          std::swap(Lo, Hi);
229        EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
230        Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
231        if (DisableScheduling) DAG.AssignOrdering(Hi.getNode(), Order);
232        Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
233                         DAG.getConstant(Lo.getValueType().getSizeInBits(),
234                                         TLI.getPointerTy()));
235        if (DisableScheduling) DAG.AssignOrdering(Hi.getNode(), Order);
236        Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
237        if (DisableScheduling) DAG.AssignOrdering(Lo.getNode(), Order);
238        Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
239        if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
240      }
241    } else if (ValueVT.isVector()) {
242      // Handle a multi-element vector.
243      EVT IntermediateVT, RegisterVT;
244      unsigned NumIntermediates;
245      unsigned NumRegs =
246        TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
247                                   NumIntermediates, RegisterVT);
248      assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
249      NumParts = NumRegs; // Silence a compiler warning.
250      assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
251      assert(RegisterVT == Parts[0].getValueType() &&
252             "Part type doesn't match part!");
253
254      // Assemble the parts into intermediate operands.
255      SmallVector<SDValue, 8> Ops(NumIntermediates);
256      if (NumIntermediates == NumParts) {
257        // If the register was not expanded, truncate or copy the value,
258        // as appropriate.
259        for (unsigned i = 0; i != NumParts; ++i)
260          Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i], 1,
261                                    PartVT, IntermediateVT);
262      } else if (NumParts > 0) {
263        // If the intermediate type was expanded, build the intermediate operands
264        // from the parts.
265        assert(NumParts % NumIntermediates == 0 &&
266               "Must expand into a divisible number of parts!");
267        unsigned Factor = NumParts / NumIntermediates;
268        for (unsigned i = 0; i != NumIntermediates; ++i)
269          Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i * Factor], Factor,
270                                    PartVT, IntermediateVT);
271      }
272
273      // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
274      // operands.
275      Val = DAG.getNode(IntermediateVT.isVector() ?
276                        ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
277                        ValueVT, &Ops[0], NumIntermediates);
278      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
279    } else if (PartVT.isFloatingPoint()) {
280      // FP split into multiple FP parts (for ppcf128)
281      assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
282             "Unexpected split");
283      SDValue Lo, Hi;
284      Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
285      Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
286      if (TLI.isBigEndian())
287        std::swap(Lo, Hi);
288      Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
289
290      if (DisableScheduling) {
291        DAG.AssignOrdering(Hi.getNode(), Order);
292        DAG.AssignOrdering(Lo.getNode(), Order);
293        DAG.AssignOrdering(Val.getNode(), Order);
294      }
295    } else {
296      // FP split into integer parts (soft fp)
297      assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
298             !PartVT.isVector() && "Unexpected split");
299      EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
300      Val = getCopyFromParts(DAG, dl, Order, Parts, NumParts, PartVT, IntVT);
301    }
302  }
303
304  // There is now one part, held in Val.  Correct it to match ValueVT.
305  PartVT = Val.getValueType();
306
307  if (PartVT == ValueVT)
308    return Val;
309
310  if (PartVT.isVector()) {
311    assert(ValueVT.isVector() && "Unknown vector conversion!");
312    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
313    if (DisableScheduling)
314      DAG.AssignOrdering(Res.getNode(), Order);
315    return Res;
316  }
317
318  if (ValueVT.isVector()) {
319    assert(ValueVT.getVectorElementType() == PartVT &&
320           ValueVT.getVectorNumElements() == 1 &&
321           "Only trivial scalar-to-vector conversions should get here!");
322    SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
323    if (DisableScheduling)
324      DAG.AssignOrdering(Res.getNode(), Order);
325    return Res;
326  }
327
328  if (PartVT.isInteger() &&
329      ValueVT.isInteger()) {
330    if (ValueVT.bitsLT(PartVT)) {
331      // For a truncate, see if we have any information to
332      // indicate whether the truncated bits will always be
333      // zero or sign-extension.
334      if (AssertOp != ISD::DELETED_NODE)
335        Val = DAG.getNode(AssertOp, dl, PartVT, Val,
336                          DAG.getValueType(ValueVT));
337      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
338      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
339      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
340      return Val;
341    } else {
342      Val = DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
343      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
344      return Val;
345    }
346  }
347
348  if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
349    if (ValueVT.bitsLT(Val.getValueType())) {
350      // FP_ROUND's are always exact here.
351      Val = DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
352                        DAG.getIntPtrConstant(1));
353      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
354      return Val;
355    }
356
357    Val = DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
358    if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
359    return Val;
360  }
361
362  if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
363    Val = DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
364    if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
365    return Val;
366  }
367
368  llvm_unreachable("Unknown mismatch!");
369  return SDValue();
370}
371
372/// getCopyToParts - Create a series of nodes that contain the specified value
373/// split into legal parts.  If the parts contain more bits than Val, then, for
374/// integers, ExtendKind can be used to specify how to generate the extra bits.
375static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
376                           SDValue Val, SDValue *Parts, unsigned NumParts,
377                           EVT PartVT,
378                           ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
379  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
380  EVT PtrVT = TLI.getPointerTy();
381  EVT ValueVT = Val.getValueType();
382  unsigned PartBits = PartVT.getSizeInBits();
383  unsigned OrigNumParts = NumParts;
384  assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
385
386  if (!NumParts)
387    return;
388
389  if (!ValueVT.isVector()) {
390    if (PartVT == ValueVT) {
391      assert(NumParts == 1 && "No-op copy with multiple parts!");
392      Parts[0] = Val;
393      return;
394    }
395
396    if (NumParts * PartBits > ValueVT.getSizeInBits()) {
397      // If the parts cover more bits than the value has, promote the value.
398      if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
399        assert(NumParts == 1 && "Do not know what to promote to!");
400        Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
401      } else if (PartVT.isInteger() && ValueVT.isInteger()) {
402        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
403        Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
404      } else {
405        llvm_unreachable("Unknown mismatch!");
406      }
407    } else if (PartBits == ValueVT.getSizeInBits()) {
408      // Different types of the same size.
409      assert(NumParts == 1 && PartVT != ValueVT);
410      Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
411    } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
412      // If the parts cover less bits than value has, truncate the value.
413      if (PartVT.isInteger() && ValueVT.isInteger()) {
414        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
415        Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
416      } else {
417        llvm_unreachable("Unknown mismatch!");
418      }
419    }
420
421    if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
422
423    // The value may have changed - recompute ValueVT.
424    ValueVT = Val.getValueType();
425    assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
426           "Failed to tile the value with PartVT!");
427
428    if (NumParts == 1) {
429      assert(PartVT == ValueVT && "Type conversion failed!");
430      Parts[0] = Val;
431      return;
432    }
433
434    // Expand the value into multiple parts.
435    if (NumParts & (NumParts - 1)) {
436      // The number of parts is not a power of 2.  Split off and copy the tail.
437      assert(PartVT.isInteger() && ValueVT.isInteger() &&
438             "Do not know what to expand to!");
439      unsigned RoundParts = 1 << Log2_32(NumParts);
440      unsigned RoundBits = RoundParts * PartBits;
441      unsigned OddParts = NumParts - RoundParts;
442      SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
443                                   DAG.getConstant(RoundBits,
444                                                   TLI.getPointerTy()));
445      getCopyToParts(DAG, dl, Order, OddVal, Parts + RoundParts,
446                     OddParts, PartVT);
447
448      if (TLI.isBigEndian())
449        // The odd parts were reversed by getCopyToParts - unreverse them.
450        std::reverse(Parts + RoundParts, Parts + NumParts);
451
452      NumParts = RoundParts;
453      ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
454      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
455
456      if (DisableScheduling) {
457        DAG.AssignOrdering(OddVal.getNode(), Order);
458        DAG.AssignOrdering(Val.getNode(), Order);
459      }
460    }
461
462    // The number of parts is a power of 2.  Repeatedly bisect the value using
463    // EXTRACT_ELEMENT.
464    Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
465                           EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
466                           Val);
467
468    if (DisableScheduling)
469      DAG.AssignOrdering(Parts[0].getNode(), Order);
470
471    for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
472      for (unsigned i = 0; i < NumParts; i += StepSize) {
473        unsigned ThisBits = StepSize * PartBits / 2;
474        EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
475        SDValue &Part0 = Parts[i];
476        SDValue &Part1 = Parts[i+StepSize/2];
477
478        Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
479                            ThisVT, Part0,
480                            DAG.getConstant(1, PtrVT));
481        Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
482                            ThisVT, Part0,
483                            DAG.getConstant(0, PtrVT));
484
485        if (DisableScheduling) {
486          DAG.AssignOrdering(Part0.getNode(), Order);
487          DAG.AssignOrdering(Part1.getNode(), Order);
488        }
489
490        if (ThisBits == PartBits && ThisVT != PartVT) {
491          Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
492                                                PartVT, Part0);
493          Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
494                                                PartVT, Part1);
495          if (DisableScheduling) {
496            DAG.AssignOrdering(Part0.getNode(), Order);
497            DAG.AssignOrdering(Part1.getNode(), Order);
498          }
499        }
500      }
501    }
502
503    if (TLI.isBigEndian())
504      std::reverse(Parts, Parts + OrigNumParts);
505
506    return;
507  }
508
509  // Vector ValueVT.
510  if (NumParts == 1) {
511    if (PartVT != ValueVT) {
512      if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
513        Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
514      } else {
515        assert(ValueVT.getVectorElementType() == PartVT &&
516               ValueVT.getVectorNumElements() == 1 &&
517               "Only trivial vector-to-scalar conversions should get here!");
518        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
519                          PartVT, Val,
520                          DAG.getConstant(0, PtrVT));
521      }
522    }
523
524    if (DisableScheduling)
525      DAG.AssignOrdering(Val.getNode(), Order);
526
527    Parts[0] = Val;
528    return;
529  }
530
531  // Handle a multi-element vector.
532  EVT IntermediateVT, RegisterVT;
533  unsigned NumIntermediates;
534  unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
535                              IntermediateVT, NumIntermediates, RegisterVT);
536  unsigned NumElements = ValueVT.getVectorNumElements();
537
538  assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
539  NumParts = NumRegs; // Silence a compiler warning.
540  assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
541
542  // Split the vector into intermediate operands.
543  SmallVector<SDValue, 8> Ops(NumIntermediates);
544  for (unsigned i = 0; i != NumIntermediates; ++i) {
545    if (IntermediateVT.isVector())
546      Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
547                           IntermediateVT, Val,
548                           DAG.getConstant(i * (NumElements / NumIntermediates),
549                                           PtrVT));
550    else
551      Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
552                           IntermediateVT, Val,
553                           DAG.getConstant(i, PtrVT));
554
555    if (DisableScheduling)
556      DAG.AssignOrdering(Ops[i].getNode(), Order);
557  }
558
559  // Split the intermediate operands into legal parts.
560  if (NumParts == NumIntermediates) {
561    // If the register was not expanded, promote or copy the value,
562    // as appropriate.
563    for (unsigned i = 0; i != NumParts; ++i)
564      getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i], 1, PartVT);
565  } else if (NumParts > 0) {
566    // If the intermediate type was expanded, split each the value into
567    // legal parts.
568    assert(NumParts % NumIntermediates == 0 &&
569           "Must expand into a divisible number of parts!");
570    unsigned Factor = NumParts / NumIntermediates;
571    for (unsigned i = 0; i != NumIntermediates; ++i)
572      getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i*Factor], Factor, PartVT);
573  }
574}
575
576
577void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
578  AA = &aa;
579  GFI = gfi;
580  TD = DAG.getTarget().getTargetData();
581}
582
583/// clear - Clear out the curret SelectionDAG and the associated
584/// state and prepare this SelectionDAGBuilder object to be used
585/// for a new block. This doesn't clear out information about
586/// additional blocks that are needed to complete switch lowering
587/// or PHI node updating; that information is cleared out as it is
588/// consumed.
589void SelectionDAGBuilder::clear() {
590  NodeMap.clear();
591  PendingLoads.clear();
592  PendingExports.clear();
593  EdgeMapping.clear();
594  DAG.clear();
595  CurDebugLoc = DebugLoc::getUnknownLoc();
596  HasTailCall = false;
597}
598
599/// getRoot - Return the current virtual root of the Selection DAG,
600/// flushing any PendingLoad items. This must be done before emitting
601/// a store or any other node that may need to be ordered after any
602/// prior load instructions.
603///
604SDValue SelectionDAGBuilder::getRoot() {
605  if (PendingLoads.empty())
606    return DAG.getRoot();
607
608  if (PendingLoads.size() == 1) {
609    SDValue Root = PendingLoads[0];
610    DAG.setRoot(Root);
611    PendingLoads.clear();
612    return Root;
613  }
614
615  // Otherwise, we have to make a token factor node.
616  SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
617                               &PendingLoads[0], PendingLoads.size());
618  PendingLoads.clear();
619  DAG.setRoot(Root);
620  return Root;
621}
622
623/// getControlRoot - Similar to getRoot, but instead of flushing all the
624/// PendingLoad items, flush all the PendingExports items. It is necessary
625/// to do this before emitting a terminator instruction.
626///
627SDValue SelectionDAGBuilder::getControlRoot() {
628  SDValue Root = DAG.getRoot();
629
630  if (PendingExports.empty())
631    return Root;
632
633  // Turn all of the CopyToReg chains into one factored node.
634  if (Root.getOpcode() != ISD::EntryToken) {
635    unsigned i = 0, e = PendingExports.size();
636    for (; i != e; ++i) {
637      assert(PendingExports[i].getNode()->getNumOperands() > 1);
638      if (PendingExports[i].getNode()->getOperand(0) == Root)
639        break;  // Don't add the root if we already indirectly depend on it.
640    }
641
642    if (i == e)
643      PendingExports.push_back(Root);
644  }
645
646  Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
647                     &PendingExports[0],
648                     PendingExports.size());
649  PendingExports.clear();
650  DAG.setRoot(Root);
651  return Root;
652}
653
654void SelectionDAGBuilder::visit(Instruction &I) {
655  visit(I.getOpcode(), I);
656}
657
658void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
659  // We're processing a new instruction.
660  ++SDNodeOrder;
661
662  // Note: this doesn't use InstVisitor, because it has to work with
663  // ConstantExpr's in addition to instructions.
664  switch (Opcode) {
665  default: llvm_unreachable("Unknown instruction type encountered!");
666    // Build the switch statement using the Instruction.def file.
667#define HANDLE_INST(NUM, OPCODE, CLASS) \
668  case Instruction::OPCODE: return visit##OPCODE((CLASS&)I);
669#include "llvm/Instruction.def"
670  }
671}
672
673SDValue SelectionDAGBuilder::getValue(const Value *V) {
674  SDValue &N = NodeMap[V];
675  if (N.getNode()) return N;
676
677  if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
678    EVT VT = TLI.getValueType(V->getType(), true);
679
680    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
681      return N = DAG.getConstant(*CI, VT);
682
683    if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
684      return N = DAG.getGlobalAddress(GV, VT);
685
686    if (isa<ConstantPointerNull>(C))
687      return N = DAG.getConstant(0, TLI.getPointerTy());
688
689    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
690      return N = DAG.getConstantFP(*CFP, VT);
691
692    if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
693      return N = DAG.getUNDEF(VT);
694
695    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
696      visit(CE->getOpcode(), *CE);
697      SDValue N1 = NodeMap[V];
698      assert(N1.getNode() && "visit didn't populate the ValueMap!");
699      return N1;
700    }
701
702    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
703      SmallVector<SDValue, 4> Constants;
704      for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
705           OI != OE; ++OI) {
706        SDNode *Val = getValue(*OI).getNode();
707        // If the operand is an empty aggregate, there are no values.
708        if (!Val) continue;
709        // Add each leaf value from the operand to the Constants list
710        // to form a flattened list of all the values.
711        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
712          Constants.push_back(SDValue(Val, i));
713      }
714
715      SDValue Res = DAG.getMergeValues(&Constants[0], Constants.size(),
716                                       getCurDebugLoc());
717      if (DisableScheduling)
718        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
719      return Res;
720    }
721
722    if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
723      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
724             "Unknown struct or array constant!");
725
726      SmallVector<EVT, 4> ValueVTs;
727      ComputeValueVTs(TLI, C->getType(), ValueVTs);
728      unsigned NumElts = ValueVTs.size();
729      if (NumElts == 0)
730        return SDValue(); // empty struct
731      SmallVector<SDValue, 4> Constants(NumElts);
732      for (unsigned i = 0; i != NumElts; ++i) {
733        EVT EltVT = ValueVTs[i];
734        if (isa<UndefValue>(C))
735          Constants[i] = DAG.getUNDEF(EltVT);
736        else if (EltVT.isFloatingPoint())
737          Constants[i] = DAG.getConstantFP(0, EltVT);
738        else
739          Constants[i] = DAG.getConstant(0, EltVT);
740      }
741
742      SDValue Res = DAG.getMergeValues(&Constants[0], NumElts,
743                                       getCurDebugLoc());
744      if (DisableScheduling)
745        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
746      return Res;
747    }
748
749    if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
750      return DAG.getBlockAddress(BA, VT);
751
752    const VectorType *VecTy = cast<VectorType>(V->getType());
753    unsigned NumElements = VecTy->getNumElements();
754
755    // Now that we know the number and type of the elements, get that number of
756    // elements into the Ops array based on what kind of constant it is.
757    SmallVector<SDValue, 16> Ops;
758    if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
759      for (unsigned i = 0; i != NumElements; ++i)
760        Ops.push_back(getValue(CP->getOperand(i)));
761    } else {
762      assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
763      EVT EltVT = TLI.getValueType(VecTy->getElementType());
764
765      SDValue Op;
766      if (EltVT.isFloatingPoint())
767        Op = DAG.getConstantFP(0, EltVT);
768      else
769        Op = DAG.getConstant(0, EltVT);
770      Ops.assign(NumElements, Op);
771    }
772
773    // Create a BUILD_VECTOR node.
774    SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
775                              VT, &Ops[0], Ops.size());
776    if (DisableScheduling)
777      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
778
779    return NodeMap[V] = Res;
780  }
781
782  // If this is a static alloca, generate it as the frameindex instead of
783  // computation.
784  if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
785    DenseMap<const AllocaInst*, int>::iterator SI =
786      FuncInfo.StaticAllocaMap.find(AI);
787    if (SI != FuncInfo.StaticAllocaMap.end())
788      return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
789  }
790
791  unsigned InReg = FuncInfo.ValueMap[V];
792  assert(InReg && "Value not in map!");
793
794  RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
795  SDValue Chain = DAG.getEntryNode();
796  return RFV.getCopyFromRegs(DAG, getCurDebugLoc(),
797                             SDNodeOrder, Chain, NULL);
798}
799
800/// Get the EVTs and ArgFlags collections that represent the return type
801/// of the given function.  This does not require a DAG or a return value, and
802/// is suitable for use before any DAGs for the function are constructed.
803static void getReturnInfo(const Type* ReturnType,
804                   Attributes attr, SmallVectorImpl<EVT> &OutVTs,
805                   SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
806                   TargetLowering &TLI,
807                   SmallVectorImpl<uint64_t> *Offsets = 0) {
808  SmallVector<EVT, 4> ValueVTs;
809  ComputeValueVTs(TLI, ReturnType, ValueVTs, Offsets);
810  unsigned NumValues = ValueVTs.size();
811  if ( NumValues == 0 ) return;
812
813  for (unsigned j = 0, f = NumValues; j != f; ++j) {
814    EVT VT = ValueVTs[j];
815    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
816
817    if (attr & Attribute::SExt)
818      ExtendKind = ISD::SIGN_EXTEND;
819    else if (attr & Attribute::ZExt)
820      ExtendKind = ISD::ZERO_EXTEND;
821
822    // FIXME: C calling convention requires the return type to be promoted to
823    // at least 32-bit. But this is not necessary for non-C calling
824    // conventions. The frontend should mark functions whose return values
825    // require promoting with signext or zeroext attributes.
826    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
827      EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
828      if (VT.bitsLT(MinVT))
829        VT = MinVT;
830    }
831
832    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
833    EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
834    // 'inreg' on function refers to return value
835    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
836    if (attr & Attribute::InReg)
837      Flags.setInReg();
838
839    // Propagate extension type if any
840    if (attr & Attribute::SExt)
841      Flags.setSExt();
842    else if (attr & Attribute::ZExt)
843      Flags.setZExt();
844
845    for (unsigned i = 0; i < NumParts; ++i) {
846      OutVTs.push_back(PartVT);
847      OutFlags.push_back(Flags);
848    }
849  }
850}
851
852void SelectionDAGBuilder::visitRet(ReturnInst &I) {
853  SDValue Chain = getControlRoot();
854  SmallVector<ISD::OutputArg, 8> Outs;
855  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
856
857  if (!FLI.CanLowerReturn) {
858    unsigned DemoteReg = FLI.DemoteRegister;
859    const Function *F = I.getParent()->getParent();
860
861    // Emit a store of the return value through the virtual register.
862    // Leave Outs empty so that LowerReturn won't try to load return
863    // registers the usual way.
864    SmallVector<EVT, 1> PtrValueVTs;
865    ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
866                    PtrValueVTs);
867
868    SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
869    SDValue RetOp = getValue(I.getOperand(0));
870
871    SmallVector<EVT, 4> ValueVTs;
872    SmallVector<uint64_t, 4> Offsets;
873    ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
874    unsigned NumValues = ValueVTs.size();
875
876    SmallVector<SDValue, 4> Chains(NumValues);
877    EVT PtrVT = PtrValueVTs[0];
878    for (unsigned i = 0; i != NumValues; ++i) {
879      SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr,
880                                DAG.getConstant(Offsets[i], PtrVT));
881      Chains[i] =
882        DAG.getStore(Chain, getCurDebugLoc(),
883                     SDValue(RetOp.getNode(), RetOp.getResNo() + i),
884                     Add, NULL, Offsets[i], false, 0);
885
886      if (DisableScheduling) {
887        DAG.AssignOrdering(Add.getNode(), SDNodeOrder);
888        DAG.AssignOrdering(Chains[i].getNode(), SDNodeOrder);
889      }
890    }
891
892    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
893                        MVT::Other, &Chains[0], NumValues);
894
895    if (DisableScheduling)
896      DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
897  } else {
898    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
899      SmallVector<EVT, 4> ValueVTs;
900      ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
901      unsigned NumValues = ValueVTs.size();
902      if (NumValues == 0) continue;
903
904      SDValue RetOp = getValue(I.getOperand(i));
905      for (unsigned j = 0, f = NumValues; j != f; ++j) {
906        EVT VT = ValueVTs[j];
907
908        ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
909
910        const Function *F = I.getParent()->getParent();
911        if (F->paramHasAttr(0, Attribute::SExt))
912          ExtendKind = ISD::SIGN_EXTEND;
913        else if (F->paramHasAttr(0, Attribute::ZExt))
914          ExtendKind = ISD::ZERO_EXTEND;
915
916        // FIXME: C calling convention requires the return type to be promoted to
917        // at least 32-bit. But this is not necessary for non-C calling
918        // conventions. The frontend should mark functions whose return values
919        // require promoting with signext or zeroext attributes.
920        if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
921          EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
922          if (VT.bitsLT(MinVT))
923            VT = MinVT;
924        }
925
926        unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
927        EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
928        SmallVector<SDValue, 4> Parts(NumParts);
929        getCopyToParts(DAG, getCurDebugLoc(), SDNodeOrder,
930                       SDValue(RetOp.getNode(), RetOp.getResNo() + j),
931                       &Parts[0], NumParts, PartVT, ExtendKind);
932
933        // 'inreg' on function refers to return value
934        ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
935        if (F->paramHasAttr(0, Attribute::InReg))
936          Flags.setInReg();
937
938        // Propagate extension type if any
939        if (F->paramHasAttr(0, Attribute::SExt))
940          Flags.setSExt();
941        else if (F->paramHasAttr(0, Attribute::ZExt))
942          Flags.setZExt();
943
944        for (unsigned i = 0; i < NumParts; ++i)
945          Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
946      }
947    }
948  }
949
950  bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
951  CallingConv::ID CallConv =
952    DAG.getMachineFunction().getFunction()->getCallingConv();
953  Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
954                          Outs, getCurDebugLoc(), DAG);
955
956  // Verify that the target's LowerReturn behaved as expected.
957  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
958         "LowerReturn didn't return a valid chain!");
959
960  // Update the DAG with the new chain value resulting from return lowering.
961  DAG.setRoot(Chain);
962
963  if (DisableScheduling)
964    DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
965}
966
967/// CopyToExportRegsIfNeeded - If the given value has virtual registers
968/// created for it, emit nodes to copy the value into the virtual
969/// registers.
970void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
971  if (!V->use_empty()) {
972    DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
973    if (VMI != FuncInfo.ValueMap.end())
974      CopyValueToVirtualRegister(V, VMI->second);
975  }
976}
977
978/// ExportFromCurrentBlock - If this condition isn't known to be exported from
979/// the current basic block, add it to ValueMap now so that we'll get a
980/// CopyTo/FromReg.
981void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
982  // No need to export constants.
983  if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
984
985  // Already exported?
986  if (FuncInfo.isExportedInst(V)) return;
987
988  unsigned Reg = FuncInfo.InitializeRegForValue(V);
989  CopyValueToVirtualRegister(V, Reg);
990}
991
992bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
993                                                     const BasicBlock *FromBB) {
994  // The operands of the setcc have to be in this block.  We don't know
995  // how to export them from some other block.
996  if (Instruction *VI = dyn_cast<Instruction>(V)) {
997    // Can export from current BB.
998    if (VI->getParent() == FromBB)
999      return true;
1000
1001    // Is already exported, noop.
1002    return FuncInfo.isExportedInst(V);
1003  }
1004
1005  // If this is an argument, we can export it if the BB is the entry block or
1006  // if it is already exported.
1007  if (isa<Argument>(V)) {
1008    if (FromBB == &FromBB->getParent()->getEntryBlock())
1009      return true;
1010
1011    // Otherwise, can only export this if it is already exported.
1012    return FuncInfo.isExportedInst(V);
1013  }
1014
1015  // Otherwise, constants can always be exported.
1016  return true;
1017}
1018
1019static bool InBlock(const Value *V, const BasicBlock *BB) {
1020  if (const Instruction *I = dyn_cast<Instruction>(V))
1021    return I->getParent() == BB;
1022  return true;
1023}
1024
1025/// getFCmpCondCode - Return the ISD condition code corresponding to
1026/// the given LLVM IR floating-point condition code.  This includes
1027/// consideration of global floating-point math flags.
1028///
1029static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1030  ISD::CondCode FPC, FOC;
1031  switch (Pred) {
1032  case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1033  case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1034  case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1035  case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1036  case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1037  case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1038  case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1039  case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
1040  case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
1041  case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1042  case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1043  case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1044  case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1045  case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1046  case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1047  case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
1048  default:
1049    llvm_unreachable("Invalid FCmp predicate opcode!");
1050    FOC = FPC = ISD::SETFALSE;
1051    break;
1052  }
1053  if (FiniteOnlyFPMath())
1054    return FOC;
1055  else
1056    return FPC;
1057}
1058
1059/// getICmpCondCode - Return the ISD condition code corresponding to
1060/// the given LLVM IR integer condition code.
1061///
1062static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1063  switch (Pred) {
1064  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
1065  case ICmpInst::ICMP_NE:  return ISD::SETNE;
1066  case ICmpInst::ICMP_SLE: return ISD::SETLE;
1067  case ICmpInst::ICMP_ULE: return ISD::SETULE;
1068  case ICmpInst::ICMP_SGE: return ISD::SETGE;
1069  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1070  case ICmpInst::ICMP_SLT: return ISD::SETLT;
1071  case ICmpInst::ICMP_ULT: return ISD::SETULT;
1072  case ICmpInst::ICMP_SGT: return ISD::SETGT;
1073  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1074  default:
1075    llvm_unreachable("Invalid ICmp predicate opcode!");
1076    return ISD::SETNE;
1077  }
1078}
1079
1080/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1081/// This function emits a branch and is used at the leaves of an OR or an
1082/// AND operator tree.
1083///
1084void
1085SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
1086                                                  MachineBasicBlock *TBB,
1087                                                  MachineBasicBlock *FBB,
1088                                                  MachineBasicBlock *CurBB) {
1089  const BasicBlock *BB = CurBB->getBasicBlock();
1090
1091  // If the leaf of the tree is a comparison, merge the condition into
1092  // the caseblock.
1093  if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1094    // The operands of the cmp have to be in this block.  We don't know
1095    // how to export them from some other block.  If this is the first block
1096    // of the sequence, no exporting is needed.
1097    if (CurBB == CurMBB ||
1098        (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1099         isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1100      ISD::CondCode Condition;
1101      if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1102        Condition = getICmpCondCode(IC->getPredicate());
1103      } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1104        Condition = getFCmpCondCode(FC->getPredicate());
1105      } else {
1106        Condition = ISD::SETEQ; // silence warning.
1107        llvm_unreachable("Unknown compare instruction");
1108      }
1109
1110      CaseBlock CB(Condition, BOp->getOperand(0),
1111                   BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1112      SwitchCases.push_back(CB);
1113      return;
1114    }
1115  }
1116
1117  // Create a CaseBlock record representing this branch.
1118  CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1119               NULL, TBB, FBB, CurBB);
1120  SwitchCases.push_back(CB);
1121}
1122
1123/// FindMergedConditions - If Cond is an expression like
1124void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
1125                                               MachineBasicBlock *TBB,
1126                                               MachineBasicBlock *FBB,
1127                                               MachineBasicBlock *CurBB,
1128                                               unsigned Opc) {
1129  // If this node is not part of the or/and tree, emit it as a branch.
1130  Instruction *BOp = dyn_cast<Instruction>(Cond);
1131  if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1132      (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1133      BOp->getParent() != CurBB->getBasicBlock() ||
1134      !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1135      !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1136    EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1137    return;
1138  }
1139
1140  //  Create TmpBB after CurBB.
1141  MachineFunction::iterator BBI = CurBB;
1142  MachineFunction &MF = DAG.getMachineFunction();
1143  MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1144  CurBB->getParent()->insert(++BBI, TmpBB);
1145
1146  if (Opc == Instruction::Or) {
1147    // Codegen X | Y as:
1148    //   jmp_if_X TBB
1149    //   jmp TmpBB
1150    // TmpBB:
1151    //   jmp_if_Y TBB
1152    //   jmp FBB
1153    //
1154
1155    // Emit the LHS condition.
1156    FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1157
1158    // Emit the RHS condition into TmpBB.
1159    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1160  } else {
1161    assert(Opc == Instruction::And && "Unknown merge op!");
1162    // Codegen X & Y as:
1163    //   jmp_if_X TmpBB
1164    //   jmp FBB
1165    // TmpBB:
1166    //   jmp_if_Y TBB
1167    //   jmp FBB
1168    //
1169    //  This requires creation of TmpBB after CurBB.
1170
1171    // Emit the LHS condition.
1172    FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1173
1174    // Emit the RHS condition into TmpBB.
1175    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1176  }
1177}
1178
1179/// If the set of cases should be emitted as a series of branches, return true.
1180/// If we should emit this as a bunch of and/or'd together conditions, return
1181/// false.
1182bool
1183SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1184  if (Cases.size() != 2) return true;
1185
1186  // If this is two comparisons of the same values or'd or and'd together, they
1187  // will get folded into a single comparison, so don't emit two blocks.
1188  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1189       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1190      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1191       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1192    return false;
1193  }
1194
1195  return true;
1196}
1197
1198void SelectionDAGBuilder::visitBr(BranchInst &I) {
1199  // Update machine-CFG edges.
1200  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1201
1202  // Figure out which block is immediately after the current one.
1203  MachineBasicBlock *NextBlock = 0;
1204  MachineFunction::iterator BBI = CurMBB;
1205  if (++BBI != FuncInfo.MF->end())
1206    NextBlock = BBI;
1207
1208  if (I.isUnconditional()) {
1209    // Update machine-CFG edges.
1210    CurMBB->addSuccessor(Succ0MBB);
1211
1212    // If this is not a fall-through branch, emit the branch.
1213    if (Succ0MBB != NextBlock) {
1214      SDValue V = DAG.getNode(ISD::BR, getCurDebugLoc(),
1215                              MVT::Other, getControlRoot(),
1216                              DAG.getBasicBlock(Succ0MBB));
1217      DAG.setRoot(V);
1218
1219      if (DisableScheduling)
1220        DAG.AssignOrdering(V.getNode(), SDNodeOrder);
1221    }
1222
1223    return;
1224  }
1225
1226  // If this condition is one of the special cases we handle, do special stuff
1227  // now.
1228  Value *CondVal = I.getCondition();
1229  MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1230
1231  // If this is a series of conditions that are or'd or and'd together, emit
1232  // this as a sequence of branches instead of setcc's with and/or operations.
1233  // For example, instead of something like:
1234  //     cmp A, B
1235  //     C = seteq
1236  //     cmp D, E
1237  //     F = setle
1238  //     or C, F
1239  //     jnz foo
1240  // Emit:
1241  //     cmp A, B
1242  //     je foo
1243  //     cmp D, E
1244  //     jle foo
1245  //
1246  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1247    if (BOp->hasOneUse() &&
1248        (BOp->getOpcode() == Instruction::And ||
1249         BOp->getOpcode() == Instruction::Or)) {
1250      FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1251      // If the compares in later blocks need to use values not currently
1252      // exported from this block, export them now.  This block should always
1253      // be the first entry.
1254      assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1255
1256      // Allow some cases to be rejected.
1257      if (ShouldEmitAsBranches(SwitchCases)) {
1258        for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1259          ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1260          ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1261        }
1262
1263        // Emit the branch for this block.
1264        visitSwitchCase(SwitchCases[0]);
1265        SwitchCases.erase(SwitchCases.begin());
1266        return;
1267      }
1268
1269      // Okay, we decided not to do this, remove any inserted MBB's and clear
1270      // SwitchCases.
1271      for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1272        FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1273
1274      SwitchCases.clear();
1275    }
1276  }
1277
1278  // Create a CaseBlock record representing this branch.
1279  CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1280               NULL, Succ0MBB, Succ1MBB, CurMBB);
1281
1282  // Use visitSwitchCase to actually insert the fast branch sequence for this
1283  // cond branch.
1284  visitSwitchCase(CB);
1285}
1286
1287/// visitSwitchCase - Emits the necessary code to represent a single node in
1288/// the binary search tree resulting from lowering a switch instruction.
1289void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
1290  SDValue Cond;
1291  SDValue CondLHS = getValue(CB.CmpLHS);
1292  DebugLoc dl = getCurDebugLoc();
1293
1294  // Build the setcc now.
1295  if (CB.CmpMHS == NULL) {
1296    // Fold "(X == true)" to X and "(X == false)" to !X to
1297    // handle common cases produced by branch lowering.
1298    if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1299        CB.CC == ISD::SETEQ)
1300      Cond = CondLHS;
1301    else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1302             CB.CC == ISD::SETEQ) {
1303      SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1304      Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1305    } else
1306      Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1307  } else {
1308    assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1309
1310    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1311    const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
1312
1313    SDValue CmpOp = getValue(CB.CmpMHS);
1314    EVT VT = CmpOp.getValueType();
1315
1316    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1317      Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1318                          ISD::SETLE);
1319    } else {
1320      SDValue SUB = DAG.getNode(ISD::SUB, dl,
1321                                VT, CmpOp, DAG.getConstant(Low, VT));
1322      Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1323                          DAG.getConstant(High-Low, VT), ISD::SETULE);
1324    }
1325  }
1326
1327  if (DisableScheduling)
1328    DAG.AssignOrdering(Cond.getNode(), SDNodeOrder);
1329
1330  // Update successor info
1331  CurMBB->addSuccessor(CB.TrueBB);
1332  CurMBB->addSuccessor(CB.FalseBB);
1333
1334  // Set NextBlock to be the MBB immediately after the current one, if any.
1335  // This is used to avoid emitting unnecessary branches to the next block.
1336  MachineBasicBlock *NextBlock = 0;
1337  MachineFunction::iterator BBI = CurMBB;
1338  if (++BBI != FuncInfo.MF->end())
1339    NextBlock = BBI;
1340
1341  // If the lhs block is the next block, invert the condition so that we can
1342  // fall through to the lhs instead of the rhs block.
1343  if (CB.TrueBB == NextBlock) {
1344    std::swap(CB.TrueBB, CB.FalseBB);
1345    SDValue True = DAG.getConstant(1, Cond.getValueType());
1346    Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1347
1348    if (DisableScheduling)
1349      DAG.AssignOrdering(Cond.getNode(), SDNodeOrder);
1350  }
1351
1352  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1353                               MVT::Other, getControlRoot(), Cond,
1354                               DAG.getBasicBlock(CB.TrueBB));
1355
1356  if (DisableScheduling)
1357    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1358
1359  // If the branch was constant folded, fix up the CFG.
1360  if (BrCond.getOpcode() == ISD::BR) {
1361    CurMBB->removeSuccessor(CB.FalseBB);
1362  } else {
1363    // Otherwise, go ahead and insert the false branch.
1364    if (BrCond == getControlRoot())
1365      CurMBB->removeSuccessor(CB.TrueBB);
1366
1367    if (CB.FalseBB != NextBlock) {
1368      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1369                           DAG.getBasicBlock(CB.FalseBB));
1370
1371      if (DisableScheduling)
1372        DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1373    }
1374  }
1375
1376  DAG.setRoot(BrCond);
1377}
1378
1379/// visitJumpTable - Emit JumpTable node in the current MBB
1380void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1381  // Emit the code for the jump table
1382  assert(JT.Reg != -1U && "Should lower JT Header first!");
1383  EVT PTy = TLI.getPointerTy();
1384  SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1385                                     JT.Reg, PTy);
1386  SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1387  SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1388                                    MVT::Other, Index.getValue(1),
1389                                    Table, Index);
1390  DAG.setRoot(BrJumpTable);
1391
1392  if (DisableScheduling) {
1393    DAG.AssignOrdering(Index.getNode(), SDNodeOrder);
1394    DAG.AssignOrdering(Table.getNode(), SDNodeOrder);
1395    DAG.AssignOrdering(BrJumpTable.getNode(), SDNodeOrder);
1396  }
1397}
1398
1399/// visitJumpTableHeader - This function emits necessary code to produce index
1400/// in the JumpTable from switch case.
1401void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1402                                               JumpTableHeader &JTH) {
1403  // Subtract the lowest switch case value from the value being switched on and
1404  // conditional branch to default mbb if the result is greater than the
1405  // difference between smallest and largest cases.
1406  SDValue SwitchOp = getValue(JTH.SValue);
1407  EVT VT = SwitchOp.getValueType();
1408  SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1409                            DAG.getConstant(JTH.First, VT));
1410
1411  // The SDNode we just created, which holds the value being switched on minus
1412  // the the smallest case value, needs to be copied to a virtual register so it
1413  // can be used as an index into the jump table in a subsequent basic block.
1414  // This value may be smaller or larger than the target's pointer type, and
1415  // therefore require extension or truncating.
1416  SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
1417
1418  unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1419  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1420                                    JumpTableReg, SwitchOp);
1421  JT.Reg = JumpTableReg;
1422
1423  // Emit the range check for the jump table, and branch to the default block
1424  // for the switch statement if the value being switched on exceeds the largest
1425  // case in the switch.
1426  SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1427                             TLI.getSetCCResultType(Sub.getValueType()), Sub,
1428                             DAG.getConstant(JTH.Last-JTH.First,VT),
1429                             ISD::SETUGT);
1430
1431  if (DisableScheduling) {
1432    DAG.AssignOrdering(Sub.getNode(), SDNodeOrder);
1433    DAG.AssignOrdering(SwitchOp.getNode(), SDNodeOrder);
1434    DAG.AssignOrdering(CopyTo.getNode(), SDNodeOrder);
1435    DAG.AssignOrdering(CMP.getNode(), SDNodeOrder);
1436  }
1437
1438  // Set NextBlock to be the MBB immediately after the current one, if any.
1439  // This is used to avoid emitting unnecessary branches to the next block.
1440  MachineBasicBlock *NextBlock = 0;
1441  MachineFunction::iterator BBI = CurMBB;
1442
1443  if (++BBI != FuncInfo.MF->end())
1444    NextBlock = BBI;
1445
1446  SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1447                               MVT::Other, CopyTo, CMP,
1448                               DAG.getBasicBlock(JT.Default));
1449
1450  if (DisableScheduling)
1451    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1452
1453  if (JT.MBB != NextBlock) {
1454    BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1455                         DAG.getBasicBlock(JT.MBB));
1456
1457    if (DisableScheduling)
1458      DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1459  }
1460
1461  DAG.setRoot(BrCond);
1462}
1463
1464/// visitBitTestHeader - This function emits necessary code to produce value
1465/// suitable for "bit tests"
1466void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
1467  // Subtract the minimum value
1468  SDValue SwitchOp = getValue(B.SValue);
1469  EVT VT = SwitchOp.getValueType();
1470  SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1471                            DAG.getConstant(B.First, VT));
1472
1473  // Check range
1474  SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1475                                  TLI.getSetCCResultType(Sub.getValueType()),
1476                                  Sub, DAG.getConstant(B.Range, VT),
1477                                  ISD::SETUGT);
1478
1479  SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
1480                                       TLI.getPointerTy());
1481
1482  B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1483  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1484                                    B.Reg, ShiftOp);
1485
1486  if (DisableScheduling) {
1487    DAG.AssignOrdering(Sub.getNode(), SDNodeOrder);
1488    DAG.AssignOrdering(RangeCmp.getNode(), SDNodeOrder);
1489    DAG.AssignOrdering(ShiftOp.getNode(), SDNodeOrder);
1490    DAG.AssignOrdering(CopyTo.getNode(), SDNodeOrder);
1491  }
1492
1493  // Set NextBlock to be the MBB immediately after the current one, if any.
1494  // This is used to avoid emitting unnecessary branches to the next block.
1495  MachineBasicBlock *NextBlock = 0;
1496  MachineFunction::iterator BBI = CurMBB;
1497  if (++BBI != FuncInfo.MF->end())
1498    NextBlock = BBI;
1499
1500  MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1501
1502  CurMBB->addSuccessor(B.Default);
1503  CurMBB->addSuccessor(MBB);
1504
1505  SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1506                                MVT::Other, CopyTo, RangeCmp,
1507                                DAG.getBasicBlock(B.Default));
1508
1509  if (DisableScheduling)
1510    DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder);
1511
1512  if (MBB != NextBlock) {
1513    BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1514                          DAG.getBasicBlock(MBB));
1515
1516    if (DisableScheduling)
1517      DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder);
1518  }
1519
1520  DAG.setRoot(BrRange);
1521}
1522
1523/// visitBitTestCase - this function produces one "bit test"
1524void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
1525                                           unsigned Reg,
1526                                           BitTestCase &B) {
1527  // Make desired shift
1528  SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1529                                       TLI.getPointerTy());
1530  SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1531                                  TLI.getPointerTy(),
1532                                  DAG.getConstant(1, TLI.getPointerTy()),
1533                                  ShiftOp);
1534
1535  // Emit bit tests and jumps
1536  SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1537                              TLI.getPointerTy(), SwitchVal,
1538                              DAG.getConstant(B.Mask, TLI.getPointerTy()));
1539  SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1540                                TLI.getSetCCResultType(AndOp.getValueType()),
1541                                AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1542                                ISD::SETNE);
1543
1544  if (DisableScheduling) {
1545    DAG.AssignOrdering(ShiftOp.getNode(), SDNodeOrder);
1546    DAG.AssignOrdering(SwitchVal.getNode(), SDNodeOrder);
1547    DAG.AssignOrdering(AndOp.getNode(), SDNodeOrder);
1548    DAG.AssignOrdering(AndCmp.getNode(), SDNodeOrder);
1549  }
1550
1551  CurMBB->addSuccessor(B.TargetBB);
1552  CurMBB->addSuccessor(NextMBB);
1553
1554  SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1555                              MVT::Other, getControlRoot(),
1556                              AndCmp, DAG.getBasicBlock(B.TargetBB));
1557
1558  if (DisableScheduling)
1559    DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder);
1560
1561  // Set NextBlock to be the MBB immediately after the current one, if any.
1562  // This is used to avoid emitting unnecessary branches to the next block.
1563  MachineBasicBlock *NextBlock = 0;
1564  MachineFunction::iterator BBI = CurMBB;
1565  if (++BBI != FuncInfo.MF->end())
1566    NextBlock = BBI;
1567
1568  if (NextMBB != NextBlock) {
1569    BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1570                        DAG.getBasicBlock(NextMBB));
1571
1572    if (DisableScheduling)
1573      DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder);
1574  }
1575
1576  DAG.setRoot(BrAnd);
1577}
1578
1579void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
1580  // Retrieve successors.
1581  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1582  MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1583
1584  const Value *Callee(I.getCalledValue());
1585  if (isa<InlineAsm>(Callee))
1586    visitInlineAsm(&I);
1587  else
1588    LowerCallTo(&I, getValue(Callee), false, LandingPad);
1589
1590  // If the value of the invoke is used outside of its defining block, make it
1591  // available as a virtual register.
1592  CopyToExportRegsIfNeeded(&I);
1593
1594  // Update successor info
1595  CurMBB->addSuccessor(Return);
1596  CurMBB->addSuccessor(LandingPad);
1597
1598  // Drop into normal successor.
1599  SDValue Branch = DAG.getNode(ISD::BR, getCurDebugLoc(),
1600                               MVT::Other, getControlRoot(),
1601                               DAG.getBasicBlock(Return));
1602  DAG.setRoot(Branch);
1603
1604  if (DisableScheduling)
1605    DAG.AssignOrdering(Branch.getNode(), SDNodeOrder);
1606}
1607
1608void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
1609}
1610
1611/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1612/// small case ranges).
1613bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
1614                                                 CaseRecVector& WorkList,
1615                                                 Value* SV,
1616                                                 MachineBasicBlock* Default) {
1617  Case& BackCase  = *(CR.Range.second-1);
1618
1619  // Size is the number of Cases represented by this range.
1620  size_t Size = CR.Range.second - CR.Range.first;
1621  if (Size > 3)
1622    return false;
1623
1624  // Get the MachineFunction which holds the current MBB.  This is used when
1625  // inserting any additional MBBs necessary to represent the switch.
1626  MachineFunction *CurMF = FuncInfo.MF;
1627
1628  // Figure out which block is immediately after the current one.
1629  MachineBasicBlock *NextBlock = 0;
1630  MachineFunction::iterator BBI = CR.CaseBB;
1631
1632  if (++BBI != FuncInfo.MF->end())
1633    NextBlock = BBI;
1634
1635  // TODO: If any two of the cases has the same destination, and if one value
1636  // is the same as the other, but has one bit unset that the other has set,
1637  // use bit manipulation to do two compares at once.  For example:
1638  // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1639
1640  // Rearrange the case blocks so that the last one falls through if possible.
1641  if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1642    // The last case block won't fall through into 'NextBlock' if we emit the
1643    // branches in this order.  See if rearranging a case value would help.
1644    for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1645      if (I->BB == NextBlock) {
1646        std::swap(*I, BackCase);
1647        break;
1648      }
1649    }
1650  }
1651
1652  // Create a CaseBlock record representing a conditional branch to
1653  // the Case's target mbb if the value being switched on SV is equal
1654  // to C.
1655  MachineBasicBlock *CurBlock = CR.CaseBB;
1656  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1657    MachineBasicBlock *FallThrough;
1658    if (I != E-1) {
1659      FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1660      CurMF->insert(BBI, FallThrough);
1661
1662      // Put SV in a virtual register to make it available from the new blocks.
1663      ExportFromCurrentBlock(SV);
1664    } else {
1665      // If the last case doesn't match, go to the default block.
1666      FallThrough = Default;
1667    }
1668
1669    Value *RHS, *LHS, *MHS;
1670    ISD::CondCode CC;
1671    if (I->High == I->Low) {
1672      // This is just small small case range :) containing exactly 1 case
1673      CC = ISD::SETEQ;
1674      LHS = SV; RHS = I->High; MHS = NULL;
1675    } else {
1676      CC = ISD::SETLE;
1677      LHS = I->Low; MHS = SV; RHS = I->High;
1678    }
1679    CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1680
1681    // If emitting the first comparison, just call visitSwitchCase to emit the
1682    // code into the current block.  Otherwise, push the CaseBlock onto the
1683    // vector to be later processed by SDISel, and insert the node's MBB
1684    // before the next MBB.
1685    if (CurBlock == CurMBB)
1686      visitSwitchCase(CB);
1687    else
1688      SwitchCases.push_back(CB);
1689
1690    CurBlock = FallThrough;
1691  }
1692
1693  return true;
1694}
1695
1696static inline bool areJTsAllowed(const TargetLowering &TLI) {
1697  return !DisableJumpTables &&
1698          (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1699           TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1700}
1701
1702static APInt ComputeRange(const APInt &First, const APInt &Last) {
1703  APInt LastExt(Last), FirstExt(First);
1704  uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1705  LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1706  return (LastExt - FirstExt + 1ULL);
1707}
1708
1709/// handleJTSwitchCase - Emit jumptable for current switch case range
1710bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
1711                                             CaseRecVector& WorkList,
1712                                             Value* SV,
1713                                             MachineBasicBlock* Default) {
1714  Case& FrontCase = *CR.Range.first;
1715  Case& BackCase  = *(CR.Range.second-1);
1716
1717  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1718  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1719
1720  APInt TSize(First.getBitWidth(), 0);
1721  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1722       I!=E; ++I)
1723    TSize += I->size();
1724
1725  if (!areJTsAllowed(TLI) || TSize.ult(APInt(First.getBitWidth(), 4)))
1726    return false;
1727
1728  APInt Range = ComputeRange(First, Last);
1729  double Density = TSize.roundToDouble() / Range.roundToDouble();
1730  if (Density < 0.4)
1731    return false;
1732
1733  DEBUG(errs() << "Lowering jump table\n"
1734               << "First entry: " << First << ". Last entry: " << Last << '\n'
1735               << "Range: " << Range
1736               << "Size: " << TSize << ". Density: " << Density << "\n\n");
1737
1738  // Get the MachineFunction which holds the current MBB.  This is used when
1739  // inserting any additional MBBs necessary to represent the switch.
1740  MachineFunction *CurMF = FuncInfo.MF;
1741
1742  // Figure out which block is immediately after the current one.
1743  MachineFunction::iterator BBI = CR.CaseBB;
1744  ++BBI;
1745
1746  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1747
1748  // Create a new basic block to hold the code for loading the address
1749  // of the jump table, and jumping to it.  Update successor information;
1750  // we will either branch to the default case for the switch, or the jump
1751  // table.
1752  MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1753  CurMF->insert(BBI, JumpTableBB);
1754  CR.CaseBB->addSuccessor(Default);
1755  CR.CaseBB->addSuccessor(JumpTableBB);
1756
1757  // Build a vector of destination BBs, corresponding to each target
1758  // of the jump table. If the value of the jump table slot corresponds to
1759  // a case statement, push the case's BB onto the vector, otherwise, push
1760  // the default BB.
1761  std::vector<MachineBasicBlock*> DestBBs;
1762  APInt TEI = First;
1763  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1764    const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1765    const APInt& High = cast<ConstantInt>(I->High)->getValue();
1766
1767    if (Low.sle(TEI) && TEI.sle(High)) {
1768      DestBBs.push_back(I->BB);
1769      if (TEI==High)
1770        ++I;
1771    } else {
1772      DestBBs.push_back(Default);
1773    }
1774  }
1775
1776  // Update successor info. Add one edge to each unique successor.
1777  BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1778  for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1779         E = DestBBs.end(); I != E; ++I) {
1780    if (!SuccsHandled[(*I)->getNumber()]) {
1781      SuccsHandled[(*I)->getNumber()] = true;
1782      JumpTableBB->addSuccessor(*I);
1783    }
1784  }
1785
1786  // Create a jump table index for this jump table, or return an existing
1787  // one.
1788  unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1789
1790  // Set the jump table information so that we can codegen it as a second
1791  // MachineBasicBlock
1792  JumpTable JT(-1U, JTI, JumpTableBB, Default);
1793  JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1794  if (CR.CaseBB == CurMBB)
1795    visitJumpTableHeader(JT, JTH);
1796
1797  JTCases.push_back(JumpTableBlock(JTH, JT));
1798
1799  return true;
1800}
1801
1802/// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1803/// 2 subtrees.
1804bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
1805                                                  CaseRecVector& WorkList,
1806                                                  Value* SV,
1807                                                  MachineBasicBlock* Default) {
1808  // Get the MachineFunction which holds the current MBB.  This is used when
1809  // inserting any additional MBBs necessary to represent the switch.
1810  MachineFunction *CurMF = FuncInfo.MF;
1811
1812  // Figure out which block is immediately after the current one.
1813  MachineFunction::iterator BBI = CR.CaseBB;
1814  ++BBI;
1815
1816  Case& FrontCase = *CR.Range.first;
1817  Case& BackCase  = *(CR.Range.second-1);
1818  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1819
1820  // Size is the number of Cases represented by this range.
1821  unsigned Size = CR.Range.second - CR.Range.first;
1822
1823  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1824  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1825  double FMetric = 0;
1826  CaseItr Pivot = CR.Range.first + Size/2;
1827
1828  // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1829  // (heuristically) allow us to emit JumpTable's later.
1830  APInt TSize(First.getBitWidth(), 0);
1831  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1832       I!=E; ++I)
1833    TSize += I->size();
1834
1835  APInt LSize = FrontCase.size();
1836  APInt RSize = TSize-LSize;
1837  DEBUG(errs() << "Selecting best pivot: \n"
1838               << "First: " << First << ", Last: " << Last <<'\n'
1839               << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1840  for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1841       J!=E; ++I, ++J) {
1842    const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
1843    const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
1844    APInt Range = ComputeRange(LEnd, RBegin);
1845    assert((Range - 2ULL).isNonNegative() &&
1846           "Invalid case distance");
1847    double LDensity = (double)LSize.roundToDouble() /
1848                           (LEnd - First + 1ULL).roundToDouble();
1849    double RDensity = (double)RSize.roundToDouble() /
1850                           (Last - RBegin + 1ULL).roundToDouble();
1851    double Metric = Range.logBase2()*(LDensity+RDensity);
1852    // Should always split in some non-trivial place
1853    DEBUG(errs() <<"=>Step\n"
1854                 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1855                 << "LDensity: " << LDensity
1856                 << ", RDensity: " << RDensity << '\n'
1857                 << "Metric: " << Metric << '\n');
1858    if (FMetric < Metric) {
1859      Pivot = J;
1860      FMetric = Metric;
1861      DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1862    }
1863
1864    LSize += J->size();
1865    RSize -= J->size();
1866  }
1867  if (areJTsAllowed(TLI)) {
1868    // If our case is dense we *really* should handle it earlier!
1869    assert((FMetric > 0) && "Should handle dense range earlier!");
1870  } else {
1871    Pivot = CR.Range.first + Size/2;
1872  }
1873
1874  CaseRange LHSR(CR.Range.first, Pivot);
1875  CaseRange RHSR(Pivot, CR.Range.second);
1876  Constant *C = Pivot->Low;
1877  MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1878
1879  // We know that we branch to the LHS if the Value being switched on is
1880  // less than the Pivot value, C.  We use this to optimize our binary
1881  // tree a bit, by recognizing that if SV is greater than or equal to the
1882  // LHS's Case Value, and that Case Value is exactly one less than the
1883  // Pivot's Value, then we can branch directly to the LHS's Target,
1884  // rather than creating a leaf node for it.
1885  if ((LHSR.second - LHSR.first) == 1 &&
1886      LHSR.first->High == CR.GE &&
1887      cast<ConstantInt>(C)->getValue() ==
1888      (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1889    TrueBB = LHSR.first->BB;
1890  } else {
1891    TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1892    CurMF->insert(BBI, TrueBB);
1893    WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1894
1895    // Put SV in a virtual register to make it available from the new blocks.
1896    ExportFromCurrentBlock(SV);
1897  }
1898
1899  // Similar to the optimization above, if the Value being switched on is
1900  // known to be less than the Constant CR.LT, and the current Case Value
1901  // is CR.LT - 1, then we can branch directly to the target block for
1902  // the current Case Value, rather than emitting a RHS leaf node for it.
1903  if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1904      cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1905      (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1906    FalseBB = RHSR.first->BB;
1907  } else {
1908    FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1909    CurMF->insert(BBI, FalseBB);
1910    WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1911
1912    // Put SV in a virtual register to make it available from the new blocks.
1913    ExportFromCurrentBlock(SV);
1914  }
1915
1916  // Create a CaseBlock record representing a conditional branch to
1917  // the LHS node if the value being switched on SV is less than C.
1918  // Otherwise, branch to LHS.
1919  CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1920
1921  if (CR.CaseBB == CurMBB)
1922    visitSwitchCase(CB);
1923  else
1924    SwitchCases.push_back(CB);
1925
1926  return true;
1927}
1928
1929/// handleBitTestsSwitchCase - if current case range has few destination and
1930/// range span less, than machine word bitwidth, encode case range into series
1931/// of masks and emit bit tests with these masks.
1932bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
1933                                                   CaseRecVector& WorkList,
1934                                                   Value* SV,
1935                                                   MachineBasicBlock* Default){
1936  EVT PTy = TLI.getPointerTy();
1937  unsigned IntPtrBits = PTy.getSizeInBits();
1938
1939  Case& FrontCase = *CR.Range.first;
1940  Case& BackCase  = *(CR.Range.second-1);
1941
1942  // Get the MachineFunction which holds the current MBB.  This is used when
1943  // inserting any additional MBBs necessary to represent the switch.
1944  MachineFunction *CurMF = FuncInfo.MF;
1945
1946  // If target does not have legal shift left, do not emit bit tests at all.
1947  if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1948    return false;
1949
1950  size_t numCmps = 0;
1951  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1952       I!=E; ++I) {
1953    // Single case counts one, case range - two.
1954    numCmps += (I->Low == I->High ? 1 : 2);
1955  }
1956
1957  // Count unique destinations
1958  SmallSet<MachineBasicBlock*, 4> Dests;
1959  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1960    Dests.insert(I->BB);
1961    if (Dests.size() > 3)
1962      // Don't bother the code below, if there are too much unique destinations
1963      return false;
1964  }
1965  DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1966               << "Total number of comparisons: " << numCmps << '\n');
1967
1968  // Compute span of values.
1969  const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1970  const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1971  APInt cmpRange = maxValue - minValue;
1972
1973  DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1974               << "Low bound: " << minValue << '\n'
1975               << "High bound: " << maxValue << '\n');
1976
1977  if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1978      (!(Dests.size() == 1 && numCmps >= 3) &&
1979       !(Dests.size() == 2 && numCmps >= 5) &&
1980       !(Dests.size() >= 3 && numCmps >= 6)))
1981    return false;
1982
1983  DEBUG(errs() << "Emitting bit tests\n");
1984  APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1985
1986  // Optimize the case where all the case values fit in a
1987  // word without having to subtract minValue. In this case,
1988  // we can optimize away the subtraction.
1989  if (minValue.isNonNegative() &&
1990      maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1991    cmpRange = maxValue;
1992  } else {
1993    lowBound = minValue;
1994  }
1995
1996  CaseBitsVector CasesBits;
1997  unsigned i, count = 0;
1998
1999  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2000    MachineBasicBlock* Dest = I->BB;
2001    for (i = 0; i < count; ++i)
2002      if (Dest == CasesBits[i].BB)
2003        break;
2004
2005    if (i == count) {
2006      assert((count < 3) && "Too much destinations to test!");
2007      CasesBits.push_back(CaseBits(0, Dest, 0));
2008      count++;
2009    }
2010
2011    const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
2012    const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
2013
2014    uint64_t lo = (lowValue - lowBound).getZExtValue();
2015    uint64_t hi = (highValue - lowBound).getZExtValue();
2016
2017    for (uint64_t j = lo; j <= hi; j++) {
2018      CasesBits[i].Mask |=  1ULL << j;
2019      CasesBits[i].Bits++;
2020    }
2021
2022  }
2023  std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2024
2025  BitTestInfo BTC;
2026
2027  // Figure out which block is immediately after the current one.
2028  MachineFunction::iterator BBI = CR.CaseBB;
2029  ++BBI;
2030
2031  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2032
2033  DEBUG(errs() << "Cases:\n");
2034  for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2035    DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2036                 << ", Bits: " << CasesBits[i].Bits
2037                 << ", BB: " << CasesBits[i].BB << '\n');
2038
2039    MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2040    CurMF->insert(BBI, CaseBB);
2041    BTC.push_back(BitTestCase(CasesBits[i].Mask,
2042                              CaseBB,
2043                              CasesBits[i].BB));
2044
2045    // Put SV in a virtual register to make it available from the new blocks.
2046    ExportFromCurrentBlock(SV);
2047  }
2048
2049  BitTestBlock BTB(lowBound, cmpRange, SV,
2050                   -1U, (CR.CaseBB == CurMBB),
2051                   CR.CaseBB, Default, BTC);
2052
2053  if (CR.CaseBB == CurMBB)
2054    visitBitTestHeader(BTB);
2055
2056  BitTestCases.push_back(BTB);
2057
2058  return true;
2059}
2060
2061/// Clusterify - Transform simple list of Cases into list of CaseRange's
2062size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
2063                                       const SwitchInst& SI) {
2064  size_t numCmps = 0;
2065
2066  // Start with "simple" cases
2067  for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2068    MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2069    Cases.push_back(Case(SI.getSuccessorValue(i),
2070                         SI.getSuccessorValue(i),
2071                         SMBB));
2072  }
2073  std::sort(Cases.begin(), Cases.end(), CaseCmp());
2074
2075  // Merge case into clusters
2076  if (Cases.size() >= 2)
2077    // Must recompute end() each iteration because it may be
2078    // invalidated by erase if we hold on to it
2079    for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2080      const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2081      const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2082      MachineBasicBlock* nextBB = J->BB;
2083      MachineBasicBlock* currentBB = I->BB;
2084
2085      // If the two neighboring cases go to the same destination, merge them
2086      // into a single case.
2087      if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2088        I->High = J->High;
2089        J = Cases.erase(J);
2090      } else {
2091        I = J++;
2092      }
2093    }
2094
2095  for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2096    if (I->Low != I->High)
2097      // A range counts double, since it requires two compares.
2098      ++numCmps;
2099  }
2100
2101  return numCmps;
2102}
2103
2104void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
2105  // Figure out which block is immediately after the current one.
2106  MachineBasicBlock *NextBlock = 0;
2107  MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2108
2109  // If there is only the default destination, branch to it if it is not the
2110  // next basic block.  Otherwise, just fall through.
2111  if (SI.getNumOperands() == 2) {
2112    // Update machine-CFG edges.
2113
2114    // If this is not a fall-through branch, emit the branch.
2115    CurMBB->addSuccessor(Default);
2116    if (Default != NextBlock) {
2117      SDValue Res = DAG.getNode(ISD::BR, getCurDebugLoc(),
2118                                MVT::Other, getControlRoot(),
2119                                DAG.getBasicBlock(Default));
2120      DAG.setRoot(Res);
2121
2122      if (DisableScheduling)
2123        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2124    }
2125
2126    return;
2127  }
2128
2129  // If there are any non-default case statements, create a vector of Cases
2130  // representing each one, and sort the vector so that we can efficiently
2131  // create a binary search tree from them.
2132  CaseVector Cases;
2133  size_t numCmps = Clusterify(Cases, SI);
2134  DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2135               << ". Total compares: " << numCmps << '\n');
2136  numCmps = 0;
2137
2138  // Get the Value to be switched on and default basic blocks, which will be
2139  // inserted into CaseBlock records, representing basic blocks in the binary
2140  // search tree.
2141  Value *SV = SI.getOperand(0);
2142
2143  // Push the initial CaseRec onto the worklist
2144  CaseRecVector WorkList;
2145  WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2146
2147  while (!WorkList.empty()) {
2148    // Grab a record representing a case range to process off the worklist
2149    CaseRec CR = WorkList.back();
2150    WorkList.pop_back();
2151
2152    if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2153      continue;
2154
2155    // If the range has few cases (two or less) emit a series of specific
2156    // tests.
2157    if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2158      continue;
2159
2160    // If the switch has more than 5 blocks, and at least 40% dense, and the
2161    // target supports indirect branches, then emit a jump table rather than
2162    // lowering the switch to a binary tree of conditional branches.
2163    if (handleJTSwitchCase(CR, WorkList, SV, Default))
2164      continue;
2165
2166    // Emit binary tree. We need to pick a pivot, and push left and right ranges
2167    // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2168    handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2169  }
2170}
2171
2172void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
2173  // Update machine-CFG edges.
2174  for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
2175    CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]);
2176
2177  SDValue Res = DAG.getNode(ISD::BRIND, getCurDebugLoc(),
2178                            MVT::Other, getControlRoot(),
2179                            getValue(I.getAddress()));
2180  DAG.setRoot(Res);
2181
2182  if (DisableScheduling)
2183    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2184}
2185
2186void SelectionDAGBuilder::visitFSub(User &I) {
2187  // -0.0 - X --> fneg
2188  const Type *Ty = I.getType();
2189  if (isa<VectorType>(Ty)) {
2190    if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2191      const VectorType *DestTy = cast<VectorType>(I.getType());
2192      const Type *ElTy = DestTy->getElementType();
2193      unsigned VL = DestTy->getNumElements();
2194      std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2195      Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2196      if (CV == CNZ) {
2197        SDValue Op2 = getValue(I.getOperand(1));
2198        SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2199                                  Op2.getValueType(), Op2);
2200        setValue(&I, Res);
2201
2202        if (DisableScheduling)
2203          DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2204
2205        return;
2206      }
2207    }
2208  }
2209
2210  if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2211    if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2212      SDValue Op2 = getValue(I.getOperand(1));
2213      SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2214                                Op2.getValueType(), Op2);
2215      setValue(&I, Res);
2216
2217      if (DisableScheduling)
2218        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2219
2220      return;
2221    }
2222
2223  visitBinary(I, ISD::FSUB);
2224}
2225
2226void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
2227  SDValue Op1 = getValue(I.getOperand(0));
2228  SDValue Op2 = getValue(I.getOperand(1));
2229  SDValue Res = DAG.getNode(OpCode, getCurDebugLoc(),
2230                            Op1.getValueType(), Op1, Op2);
2231  setValue(&I, Res);
2232
2233  if (DisableScheduling)
2234    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2235}
2236
2237void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
2238  SDValue Op1 = getValue(I.getOperand(0));
2239  SDValue Op2 = getValue(I.getOperand(1));
2240  if (!isa<VectorType>(I.getType()) &&
2241      Op2.getValueType() != TLI.getShiftAmountTy()) {
2242    // If the operand is smaller than the shift count type, promote it.
2243    EVT PTy = TLI.getPointerTy();
2244    EVT STy = TLI.getShiftAmountTy();
2245    if (STy.bitsGT(Op2.getValueType()))
2246      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2247                        TLI.getShiftAmountTy(), Op2);
2248    // If the operand is larger than the shift count type but the shift
2249    // count type has enough bits to represent any shift value, truncate
2250    // it now. This is a common case and it exposes the truncate to
2251    // optimization early.
2252    else if (STy.getSizeInBits() >=
2253             Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2254      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2255                        TLI.getShiftAmountTy(), Op2);
2256    // Otherwise we'll need to temporarily settle for some other
2257    // convenient type; type legalization will make adjustments as
2258    // needed.
2259    else if (PTy.bitsLT(Op2.getValueType()))
2260      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2261                        TLI.getPointerTy(), Op2);
2262    else if (PTy.bitsGT(Op2.getValueType()))
2263      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2264                        TLI.getPointerTy(), Op2);
2265  }
2266
2267  SDValue Res = DAG.getNode(Opcode, getCurDebugLoc(),
2268                            Op1.getValueType(), Op1, Op2);
2269  setValue(&I, Res);
2270
2271  if (DisableScheduling) {
2272    DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
2273    DAG.AssignOrdering(Op2.getNode(), SDNodeOrder);
2274    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2275  }
2276}
2277
2278void SelectionDAGBuilder::visitICmp(User &I) {
2279  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2280  if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2281    predicate = IC->getPredicate();
2282  else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2283    predicate = ICmpInst::Predicate(IC->getPredicate());
2284  SDValue Op1 = getValue(I.getOperand(0));
2285  SDValue Op2 = getValue(I.getOperand(1));
2286  ISD::CondCode Opcode = getICmpCondCode(predicate);
2287
2288  EVT DestVT = TLI.getValueType(I.getType());
2289  SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode);
2290  setValue(&I, Res);
2291
2292  if (DisableScheduling)
2293    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2294}
2295
2296void SelectionDAGBuilder::visitFCmp(User &I) {
2297  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2298  if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2299    predicate = FC->getPredicate();
2300  else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2301    predicate = FCmpInst::Predicate(FC->getPredicate());
2302  SDValue Op1 = getValue(I.getOperand(0));
2303  SDValue Op2 = getValue(I.getOperand(1));
2304  ISD::CondCode Condition = getFCmpCondCode(predicate);
2305  EVT DestVT = TLI.getValueType(I.getType());
2306  SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition);
2307  setValue(&I, Res);
2308
2309  if (DisableScheduling)
2310    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2311}
2312
2313void SelectionDAGBuilder::visitSelect(User &I) {
2314  SmallVector<EVT, 4> ValueVTs;
2315  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2316  unsigned NumValues = ValueVTs.size();
2317  if (NumValues == 0) return;
2318
2319  SmallVector<SDValue, 4> Values(NumValues);
2320  SDValue Cond     = getValue(I.getOperand(0));
2321  SDValue TrueVal  = getValue(I.getOperand(1));
2322  SDValue FalseVal = getValue(I.getOperand(2));
2323
2324  for (unsigned i = 0; i != NumValues; ++i) {
2325    Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2326                            TrueVal.getNode()->getValueType(i), Cond,
2327                            SDValue(TrueVal.getNode(),
2328                                    TrueVal.getResNo() + i),
2329                            SDValue(FalseVal.getNode(),
2330                                    FalseVal.getResNo() + i));
2331
2332    if (DisableScheduling)
2333      DAG.AssignOrdering(Values[i].getNode(), SDNodeOrder);
2334  }
2335
2336  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2337                            DAG.getVTList(&ValueVTs[0], NumValues),
2338                            &Values[0], NumValues);
2339  setValue(&I, Res);
2340
2341  if (DisableScheduling)
2342    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2343}
2344
2345void SelectionDAGBuilder::visitTrunc(User &I) {
2346  // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2347  SDValue N = getValue(I.getOperand(0));
2348  EVT DestVT = TLI.getValueType(I.getType());
2349  SDValue Res = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2350  setValue(&I, Res);
2351
2352  if (DisableScheduling)
2353    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2354}
2355
2356void SelectionDAGBuilder::visitZExt(User &I) {
2357  // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2358  // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2359  SDValue N = getValue(I.getOperand(0));
2360  EVT DestVT = TLI.getValueType(I.getType());
2361  SDValue Res = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2362  setValue(&I, Res);
2363
2364  if (DisableScheduling)
2365    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2366}
2367
2368void SelectionDAGBuilder::visitSExt(User &I) {
2369  // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2370  // SExt also can't be a cast to bool for same reason. So, nothing much to do
2371  SDValue N = getValue(I.getOperand(0));
2372  EVT DestVT = TLI.getValueType(I.getType());
2373  SDValue Res = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N);
2374  setValue(&I, Res);
2375
2376  if (DisableScheduling)
2377    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2378}
2379
2380void SelectionDAGBuilder::visitFPTrunc(User &I) {
2381  // FPTrunc is never a no-op cast, no need to check
2382  SDValue N = getValue(I.getOperand(0));
2383  EVT DestVT = TLI.getValueType(I.getType());
2384  SDValue Res = DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2385                            DestVT, N, DAG.getIntPtrConstant(0));
2386  setValue(&I, Res);
2387
2388  if (DisableScheduling)
2389    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2390}
2391
2392void SelectionDAGBuilder::visitFPExt(User &I){
2393  // FPTrunc is never a no-op cast, no need to check
2394  SDValue N = getValue(I.getOperand(0));
2395  EVT DestVT = TLI.getValueType(I.getType());
2396  SDValue Res = DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N);
2397  setValue(&I, Res);
2398
2399  if (DisableScheduling)
2400    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2401}
2402
2403void SelectionDAGBuilder::visitFPToUI(User &I) {
2404  // FPToUI is never a no-op cast, no need to check
2405  SDValue N = getValue(I.getOperand(0));
2406  EVT DestVT = TLI.getValueType(I.getType());
2407  SDValue Res = DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N);
2408  setValue(&I, Res);
2409
2410  if (DisableScheduling)
2411    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2412}
2413
2414void SelectionDAGBuilder::visitFPToSI(User &I) {
2415  // FPToSI is never a no-op cast, no need to check
2416  SDValue N = getValue(I.getOperand(0));
2417  EVT DestVT = TLI.getValueType(I.getType());
2418  SDValue Res = DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N);
2419  setValue(&I, Res);
2420
2421  if (DisableScheduling)
2422    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2423}
2424
2425void SelectionDAGBuilder::visitUIToFP(User &I) {
2426  // UIToFP is never a no-op cast, no need to check
2427  SDValue N = getValue(I.getOperand(0));
2428  EVT DestVT = TLI.getValueType(I.getType());
2429  SDValue Res = DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N);
2430  setValue(&I, Res);
2431
2432  if (DisableScheduling)
2433    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2434}
2435
2436void SelectionDAGBuilder::visitSIToFP(User &I){
2437  // SIToFP is never a no-op cast, no need to check
2438  SDValue N = getValue(I.getOperand(0));
2439  EVT DestVT = TLI.getValueType(I.getType());
2440  SDValue Res = DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N);
2441  setValue(&I, Res);
2442
2443  if (DisableScheduling)
2444    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2445}
2446
2447void SelectionDAGBuilder::visitPtrToInt(User &I) {
2448  // What to do depends on the size of the integer and the size of the pointer.
2449  // We can either truncate, zero extend, or no-op, accordingly.
2450  SDValue N = getValue(I.getOperand(0));
2451  EVT SrcVT = N.getValueType();
2452  EVT DestVT = TLI.getValueType(I.getType());
2453  SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2454  setValue(&I, Res);
2455
2456  if (DisableScheduling)
2457    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2458}
2459
2460void SelectionDAGBuilder::visitIntToPtr(User &I) {
2461  // What to do depends on the size of the integer and the size of the pointer.
2462  // We can either truncate, zero extend, or no-op, accordingly.
2463  SDValue N = getValue(I.getOperand(0));
2464  EVT SrcVT = N.getValueType();
2465  EVT DestVT = TLI.getValueType(I.getType());
2466  SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2467  setValue(&I, Res);
2468
2469  if (DisableScheduling)
2470    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2471}
2472
2473void SelectionDAGBuilder::visitBitCast(User &I) {
2474  SDValue N = getValue(I.getOperand(0));
2475  EVT DestVT = TLI.getValueType(I.getType());
2476
2477  // BitCast assures us that source and destination are the same size so this is
2478  // either a BIT_CONVERT or a no-op.
2479  if (DestVT != N.getValueType()) {
2480    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2481                              DestVT, N); // convert types.
2482    setValue(&I, Res);
2483
2484    if (DisableScheduling)
2485      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2486  } else {
2487    setValue(&I, N);            // noop cast.
2488  }
2489}
2490
2491void SelectionDAGBuilder::visitInsertElement(User &I) {
2492  SDValue InVec = getValue(I.getOperand(0));
2493  SDValue InVal = getValue(I.getOperand(1));
2494  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2495                              TLI.getPointerTy(),
2496                              getValue(I.getOperand(2)));
2497  SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2498                            TLI.getValueType(I.getType()),
2499                            InVec, InVal, InIdx);
2500  setValue(&I, Res);
2501
2502  if (DisableScheduling) {
2503    DAG.AssignOrdering(InIdx.getNode(), SDNodeOrder);
2504    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2505  }
2506}
2507
2508void SelectionDAGBuilder::visitExtractElement(User &I) {
2509  SDValue InVec = getValue(I.getOperand(0));
2510  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2511                              TLI.getPointerTy(),
2512                              getValue(I.getOperand(1)));
2513  SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2514                            TLI.getValueType(I.getType()), InVec, InIdx);
2515  setValue(&I, Res);
2516
2517  if (DisableScheduling) {
2518    DAG.AssignOrdering(InIdx.getNode(), SDNodeOrder);
2519    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2520  }
2521}
2522
2523
2524// Utility for visitShuffleVector - Returns true if the mask is mask starting
2525// from SIndx and increasing to the element length (undefs are allowed).
2526static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2527  unsigned MaskNumElts = Mask.size();
2528  for (unsigned i = 0; i != MaskNumElts; ++i)
2529    if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2530      return false;
2531  return true;
2532}
2533
2534void SelectionDAGBuilder::visitShuffleVector(User &I) {
2535  SmallVector<int, 8> Mask;
2536  SDValue Src1 = getValue(I.getOperand(0));
2537  SDValue Src2 = getValue(I.getOperand(1));
2538
2539  // Convert the ConstantVector mask operand into an array of ints, with -1
2540  // representing undef values.
2541  SmallVector<Constant*, 8> MaskElts;
2542  cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2543                                                     MaskElts);
2544  unsigned MaskNumElts = MaskElts.size();
2545  for (unsigned i = 0; i != MaskNumElts; ++i) {
2546    if (isa<UndefValue>(MaskElts[i]))
2547      Mask.push_back(-1);
2548    else
2549      Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2550  }
2551
2552  EVT VT = TLI.getValueType(I.getType());
2553  EVT SrcVT = Src1.getValueType();
2554  unsigned SrcNumElts = SrcVT.getVectorNumElements();
2555
2556  if (SrcNumElts == MaskNumElts) {
2557    SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2558                                       &Mask[0]);
2559    setValue(&I, Res);
2560
2561    if (DisableScheduling)
2562      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2563
2564    return;
2565  }
2566
2567  // Normalize the shuffle vector since mask and vector length don't match.
2568  if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2569    // Mask is longer than the source vectors and is a multiple of the source
2570    // vectors.  We can use concatenate vector to make the mask and vectors
2571    // lengths match.
2572    if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2573      // The shuffle is concatenating two vectors together.
2574      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2575                                VT, Src1, Src2);
2576      setValue(&I, Res);
2577
2578      if (DisableScheduling)
2579        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2580
2581      return;
2582    }
2583
2584    // Pad both vectors with undefs to make them the same length as the mask.
2585    unsigned NumConcat = MaskNumElts / SrcNumElts;
2586    bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2587    bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2588    SDValue UndefVal = DAG.getUNDEF(SrcVT);
2589
2590    SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2591    SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2592    MOps1[0] = Src1;
2593    MOps2[0] = Src2;
2594
2595    Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2596                                                  getCurDebugLoc(), VT,
2597                                                  &MOps1[0], NumConcat);
2598    Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2599                                                  getCurDebugLoc(), VT,
2600                                                  &MOps2[0], NumConcat);
2601
2602    // Readjust mask for new input vector length.
2603    SmallVector<int, 8> MappedOps;
2604    for (unsigned i = 0; i != MaskNumElts; ++i) {
2605      int Idx = Mask[i];
2606      if (Idx < (int)SrcNumElts)
2607        MappedOps.push_back(Idx);
2608      else
2609        MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2610    }
2611
2612    SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2613                                       &MappedOps[0]);
2614    setValue(&I, Res);
2615
2616    if (DisableScheduling) {
2617      DAG.AssignOrdering(Src1.getNode(), SDNodeOrder);
2618      DAG.AssignOrdering(Src2.getNode(), SDNodeOrder);
2619      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2620    }
2621
2622    return;
2623  }
2624
2625  if (SrcNumElts > MaskNumElts) {
2626    // Analyze the access pattern of the vector to see if we can extract
2627    // two subvectors and do the shuffle. The analysis is done by calculating
2628    // the range of elements the mask access on both vectors.
2629    int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2630    int MaxRange[2] = {-1, -1};
2631
2632    for (unsigned i = 0; i != MaskNumElts; ++i) {
2633      int Idx = Mask[i];
2634      int Input = 0;
2635      if (Idx < 0)
2636        continue;
2637
2638      if (Idx >= (int)SrcNumElts) {
2639        Input = 1;
2640        Idx -= SrcNumElts;
2641      }
2642      if (Idx > MaxRange[Input])
2643        MaxRange[Input] = Idx;
2644      if (Idx < MinRange[Input])
2645        MinRange[Input] = Idx;
2646    }
2647
2648    // Check if the access is smaller than the vector size and can we find
2649    // a reasonable extract index.
2650    int RangeUse[2] = { 2, 2 };  // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2651    int StartIdx[2];  // StartIdx to extract from
2652    for (int Input=0; Input < 2; ++Input) {
2653      if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2654        RangeUse[Input] = 0; // Unused
2655        StartIdx[Input] = 0;
2656      } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2657        // Fits within range but we should see if we can find a good
2658        // start index that is a multiple of the mask length.
2659        if (MaxRange[Input] < (int)MaskNumElts) {
2660          RangeUse[Input] = 1; // Extract from beginning of the vector
2661          StartIdx[Input] = 0;
2662        } else {
2663          StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2664          if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2665              StartIdx[Input] + MaskNumElts < SrcNumElts)
2666            RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2667        }
2668      }
2669    }
2670
2671    if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2672      SDValue Res = DAG.getUNDEF(VT);
2673      setValue(&I, Res);  // Vectors are not used.
2674
2675      if (DisableScheduling)
2676        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2677
2678      return;
2679    }
2680    else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2681      // Extract appropriate subvector and generate a vector shuffle
2682      for (int Input=0; Input < 2; ++Input) {
2683        SDValue &Src = Input == 0 ? Src1 : Src2;
2684        if (RangeUse[Input] == 0)
2685          Src = DAG.getUNDEF(VT);
2686        else
2687          Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2688                            Src, DAG.getIntPtrConstant(StartIdx[Input]));
2689
2690        if (DisableScheduling)
2691          DAG.AssignOrdering(Src.getNode(), SDNodeOrder);
2692      }
2693
2694      // Calculate new mask.
2695      SmallVector<int, 8> MappedOps;
2696      for (unsigned i = 0; i != MaskNumElts; ++i) {
2697        int Idx = Mask[i];
2698        if (Idx < 0)
2699          MappedOps.push_back(Idx);
2700        else if (Idx < (int)SrcNumElts)
2701          MappedOps.push_back(Idx - StartIdx[0]);
2702        else
2703          MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2704      }
2705
2706      SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2707                                         &MappedOps[0]);
2708      setValue(&I, Res);
2709
2710      if (DisableScheduling)
2711        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2712
2713      return;
2714    }
2715  }
2716
2717  // We can't use either concat vectors or extract subvectors so fall back to
2718  // replacing the shuffle with extract and build vector.
2719  // to insert and build vector.
2720  EVT EltVT = VT.getVectorElementType();
2721  EVT PtrVT = TLI.getPointerTy();
2722  SmallVector<SDValue,8> Ops;
2723  for (unsigned i = 0; i != MaskNumElts; ++i) {
2724    if (Mask[i] < 0) {
2725      Ops.push_back(DAG.getUNDEF(EltVT));
2726    } else {
2727      int Idx = Mask[i];
2728      SDValue Res;
2729
2730      if (Idx < (int)SrcNumElts)
2731        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2732                          EltVT, Src1, DAG.getConstant(Idx, PtrVT));
2733      else
2734        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2735                          EltVT, Src2,
2736                          DAG.getConstant(Idx - SrcNumElts, PtrVT));
2737
2738      Ops.push_back(Res);
2739
2740      if (DisableScheduling)
2741        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2742    }
2743  }
2744
2745  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2746                            VT, &Ops[0], Ops.size());
2747  setValue(&I, Res);
2748
2749  if (DisableScheduling)
2750    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2751}
2752
2753void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
2754  const Value *Op0 = I.getOperand(0);
2755  const Value *Op1 = I.getOperand(1);
2756  const Type *AggTy = I.getType();
2757  const Type *ValTy = Op1->getType();
2758  bool IntoUndef = isa<UndefValue>(Op0);
2759  bool FromUndef = isa<UndefValue>(Op1);
2760
2761  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2762                                            I.idx_begin(), I.idx_end());
2763
2764  SmallVector<EVT, 4> AggValueVTs;
2765  ComputeValueVTs(TLI, AggTy, AggValueVTs);
2766  SmallVector<EVT, 4> ValValueVTs;
2767  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2768
2769  unsigned NumAggValues = AggValueVTs.size();
2770  unsigned NumValValues = ValValueVTs.size();
2771  SmallVector<SDValue, 4> Values(NumAggValues);
2772
2773  SDValue Agg = getValue(Op0);
2774  SDValue Val = getValue(Op1);
2775  unsigned i = 0;
2776  // Copy the beginning value(s) from the original aggregate.
2777  for (; i != LinearIndex; ++i)
2778    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2779                SDValue(Agg.getNode(), Agg.getResNo() + i);
2780  // Copy values from the inserted value(s).
2781  for (; i != LinearIndex + NumValValues; ++i)
2782    Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2783                SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2784  // Copy remaining value(s) from the original aggregate.
2785  for (; i != NumAggValues; ++i)
2786    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2787                SDValue(Agg.getNode(), Agg.getResNo() + i);
2788
2789  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2790                            DAG.getVTList(&AggValueVTs[0], NumAggValues),
2791                            &Values[0], NumAggValues);
2792  setValue(&I, Res);
2793
2794  if (DisableScheduling)
2795    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2796}
2797
2798void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
2799  const Value *Op0 = I.getOperand(0);
2800  const Type *AggTy = Op0->getType();
2801  const Type *ValTy = I.getType();
2802  bool OutOfUndef = isa<UndefValue>(Op0);
2803
2804  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2805                                            I.idx_begin(), I.idx_end());
2806
2807  SmallVector<EVT, 4> ValValueVTs;
2808  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2809
2810  unsigned NumValValues = ValValueVTs.size();
2811  SmallVector<SDValue, 4> Values(NumValValues);
2812
2813  SDValue Agg = getValue(Op0);
2814  // Copy out the selected value(s).
2815  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2816    Values[i - LinearIndex] =
2817      OutOfUndef ?
2818        DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2819        SDValue(Agg.getNode(), Agg.getResNo() + i);
2820
2821  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2822                            DAG.getVTList(&ValValueVTs[0], NumValValues),
2823                            &Values[0], NumValValues);
2824  setValue(&I, Res);
2825
2826  if (DisableScheduling)
2827    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2828}
2829
2830void SelectionDAGBuilder::visitGetElementPtr(User &I) {
2831  SDValue N = getValue(I.getOperand(0));
2832  const Type *Ty = I.getOperand(0)->getType();
2833
2834  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2835       OI != E; ++OI) {
2836    Value *Idx = *OI;
2837    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2838      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2839      if (Field) {
2840        // N = N + Offset
2841        uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2842        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2843                        DAG.getIntPtrConstant(Offset));
2844
2845        if (DisableScheduling)
2846          DAG.AssignOrdering(N.getNode(), SDNodeOrder);
2847      }
2848
2849      Ty = StTy->getElementType(Field);
2850    } else {
2851      Ty = cast<SequentialType>(Ty)->getElementType();
2852
2853      // If this is a constant subscript, handle it quickly.
2854      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2855        if (CI->getZExtValue() == 0) continue;
2856        uint64_t Offs =
2857            TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2858        SDValue OffsVal;
2859        EVT PTy = TLI.getPointerTy();
2860        unsigned PtrBits = PTy.getSizeInBits();
2861        if (PtrBits < 64)
2862          OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2863                                TLI.getPointerTy(),
2864                                DAG.getConstant(Offs, MVT::i64));
2865        else
2866          OffsVal = DAG.getIntPtrConstant(Offs);
2867
2868        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2869                        OffsVal);
2870
2871        if (DisableScheduling) {
2872          DAG.AssignOrdering(OffsVal.getNode(), SDNodeOrder);
2873          DAG.AssignOrdering(N.getNode(), SDNodeOrder);
2874        }
2875
2876        continue;
2877      }
2878
2879      // N = N + Idx * ElementSize;
2880      APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
2881                                TD->getTypeAllocSize(Ty));
2882      SDValue IdxN = getValue(Idx);
2883
2884      // If the index is smaller or larger than intptr_t, truncate or extend
2885      // it.
2886      IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
2887
2888      // If this is a multiply by a power of two, turn it into a shl
2889      // immediately.  This is a very common case.
2890      if (ElementSize != 1) {
2891        if (ElementSize.isPowerOf2()) {
2892          unsigned Amt = ElementSize.logBase2();
2893          IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2894                             N.getValueType(), IdxN,
2895                             DAG.getConstant(Amt, TLI.getPointerTy()));
2896        } else {
2897          SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
2898          IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2899                             N.getValueType(), IdxN, Scale);
2900        }
2901
2902        if (DisableScheduling)
2903          DAG.AssignOrdering(IdxN.getNode(), SDNodeOrder);
2904      }
2905
2906      N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2907                      N.getValueType(), N, IdxN);
2908
2909      if (DisableScheduling)
2910        DAG.AssignOrdering(N.getNode(), SDNodeOrder);
2911    }
2912  }
2913
2914  setValue(&I, N);
2915}
2916
2917void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
2918  // If this is a fixed sized alloca in the entry block of the function,
2919  // allocate it statically on the stack.
2920  if (FuncInfo.StaticAllocaMap.count(&I))
2921    return;   // getValue will auto-populate this.
2922
2923  const Type *Ty = I.getAllocatedType();
2924  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2925  unsigned Align =
2926    std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2927             I.getAlignment());
2928
2929  SDValue AllocSize = getValue(I.getArraySize());
2930
2931  AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2932                          AllocSize,
2933                          DAG.getConstant(TySize, AllocSize.getValueType()));
2934
2935  if (DisableScheduling)
2936    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2937
2938  EVT IntPtr = TLI.getPointerTy();
2939  AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
2940
2941  if (DisableScheduling)
2942    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2943
2944  // Handle alignment.  If the requested alignment is less than or equal to
2945  // the stack alignment, ignore it.  If the size is greater than or equal to
2946  // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2947  unsigned StackAlign =
2948    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2949  if (Align <= StackAlign)
2950    Align = 0;
2951
2952  // Round the size of the allocation up to the stack alignment size
2953  // by add SA-1 to the size.
2954  AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2955                          AllocSize.getValueType(), AllocSize,
2956                          DAG.getIntPtrConstant(StackAlign-1));
2957  if (DisableScheduling)
2958    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2959
2960  // Mask out the low bits for alignment purposes.
2961  AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2962                          AllocSize.getValueType(), AllocSize,
2963                          DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2964  if (DisableScheduling)
2965    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2966
2967  SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2968  SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2969  SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2970                            VTs, Ops, 3);
2971  setValue(&I, DSA);
2972  DAG.setRoot(DSA.getValue(1));
2973
2974  if (DisableScheduling)
2975    DAG.AssignOrdering(DSA.getNode(), SDNodeOrder);
2976
2977  // Inform the Frame Information that we have just allocated a variable-sized
2978  // object.
2979  FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2980}
2981
2982void SelectionDAGBuilder::visitLoad(LoadInst &I) {
2983  const Value *SV = I.getOperand(0);
2984  SDValue Ptr = getValue(SV);
2985
2986  const Type *Ty = I.getType();
2987  bool isVolatile = I.isVolatile();
2988  unsigned Alignment = I.getAlignment();
2989
2990  SmallVector<EVT, 4> ValueVTs;
2991  SmallVector<uint64_t, 4> Offsets;
2992  ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2993  unsigned NumValues = ValueVTs.size();
2994  if (NumValues == 0)
2995    return;
2996
2997  SDValue Root;
2998  bool ConstantMemory = false;
2999  if (I.isVolatile())
3000    // Serialize volatile loads with other side effects.
3001    Root = getRoot();
3002  else if (AA->pointsToConstantMemory(SV)) {
3003    // Do not serialize (non-volatile) loads of constant memory with anything.
3004    Root = DAG.getEntryNode();
3005    ConstantMemory = true;
3006  } else {
3007    // Do not serialize non-volatile loads against each other.
3008    Root = DAG.getRoot();
3009  }
3010
3011  SmallVector<SDValue, 4> Values(NumValues);
3012  SmallVector<SDValue, 4> Chains(NumValues);
3013  EVT PtrVT = Ptr.getValueType();
3014  for (unsigned i = 0; i != NumValues; ++i) {
3015    SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(),
3016                            PtrVT, Ptr,
3017                            DAG.getConstant(Offsets[i], PtrVT));
3018    SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
3019                            A, SV, Offsets[i], isVolatile, Alignment);
3020
3021    Values[i] = L;
3022    Chains[i] = L.getValue(1);
3023
3024    if (DisableScheduling) {
3025      DAG.AssignOrdering(A.getNode(), SDNodeOrder);
3026      DAG.AssignOrdering(L.getNode(), SDNodeOrder);
3027    }
3028  }
3029
3030  if (!ConstantMemory) {
3031    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
3032                                MVT::Other, &Chains[0], NumValues);
3033    if (isVolatile)
3034      DAG.setRoot(Chain);
3035    else
3036      PendingLoads.push_back(Chain);
3037
3038    if (DisableScheduling)
3039      DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
3040  }
3041
3042  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
3043                            DAG.getVTList(&ValueVTs[0], NumValues),
3044                            &Values[0], NumValues);
3045  setValue(&I, Res);
3046
3047  if (DisableScheduling)
3048    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
3049}
3050
3051void SelectionDAGBuilder::visitStore(StoreInst &I) {
3052  Value *SrcV = I.getOperand(0);
3053  Value *PtrV = I.getOperand(1);
3054
3055  SmallVector<EVT, 4> ValueVTs;
3056  SmallVector<uint64_t, 4> Offsets;
3057  ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
3058  unsigned NumValues = ValueVTs.size();
3059  if (NumValues == 0)
3060    return;
3061
3062  // Get the lowered operands. Note that we do this after
3063  // checking if NumResults is zero, because with zero results
3064  // the operands won't have values in the map.
3065  SDValue Src = getValue(SrcV);
3066  SDValue Ptr = getValue(PtrV);
3067
3068  SDValue Root = getRoot();
3069  SmallVector<SDValue, 4> Chains(NumValues);
3070  EVT PtrVT = Ptr.getValueType();
3071  bool isVolatile = I.isVolatile();
3072  unsigned Alignment = I.getAlignment();
3073
3074  for (unsigned i = 0; i != NumValues; ++i) {
3075    SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr,
3076                              DAG.getConstant(Offsets[i], PtrVT));
3077    Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
3078                             SDValue(Src.getNode(), Src.getResNo() + i),
3079                             Add, PtrV, Offsets[i], isVolatile, Alignment);
3080
3081    if (DisableScheduling) {
3082      DAG.AssignOrdering(Add.getNode(), SDNodeOrder);
3083      DAG.AssignOrdering(Chains[i].getNode(), SDNodeOrder);
3084    }
3085  }
3086
3087  SDValue Res = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
3088                            MVT::Other, &Chains[0], NumValues);
3089  DAG.setRoot(Res);
3090
3091  if (DisableScheduling)
3092    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
3093}
3094
3095/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
3096/// node.
3097void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
3098                                               unsigned Intrinsic) {
3099  bool HasChain = !I.doesNotAccessMemory();
3100  bool OnlyLoad = HasChain && I.onlyReadsMemory();
3101
3102  // Build the operand list.
3103  SmallVector<SDValue, 8> Ops;
3104  if (HasChain) {  // If this intrinsic has side-effects, chainify it.
3105    if (OnlyLoad) {
3106      // We don't need to serialize loads against other loads.
3107      Ops.push_back(DAG.getRoot());
3108    } else {
3109      Ops.push_back(getRoot());
3110    }
3111  }
3112
3113  // Info is set by getTgtMemInstrinsic
3114  TargetLowering::IntrinsicInfo Info;
3115  bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
3116
3117  // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
3118  if (!IsTgtIntrinsic)
3119    Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
3120
3121  // Add all operands of the call to the operand list.
3122  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
3123    SDValue Op = getValue(I.getOperand(i));
3124    assert(TLI.isTypeLegal(Op.getValueType()) &&
3125           "Intrinsic uses a non-legal type?");
3126    Ops.push_back(Op);
3127  }
3128
3129  SmallVector<EVT, 4> ValueVTs;
3130  ComputeValueVTs(TLI, I.getType(), ValueVTs);
3131#ifndef NDEBUG
3132  for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
3133    assert(TLI.isTypeLegal(ValueVTs[Val]) &&
3134           "Intrinsic uses a non-legal type?");
3135  }
3136#endif // NDEBUG
3137
3138  if (HasChain)
3139    ValueVTs.push_back(MVT::Other);
3140
3141  SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
3142
3143  // Create the node.
3144  SDValue Result;
3145  if (IsTgtIntrinsic) {
3146    // This is target intrinsic that touches memory
3147    Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
3148                                     VTs, &Ops[0], Ops.size(),
3149                                     Info.memVT, Info.ptrVal, Info.offset,
3150                                     Info.align, Info.vol,
3151                                     Info.readMem, Info.writeMem);
3152  } else if (!HasChain) {
3153    Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
3154                         VTs, &Ops[0], Ops.size());
3155  } else if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
3156    Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
3157                         VTs, &Ops[0], Ops.size());
3158  } else {
3159    Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
3160                         VTs, &Ops[0], Ops.size());
3161  }
3162
3163  if (DisableScheduling)
3164    DAG.AssignOrdering(Result.getNode(), SDNodeOrder);
3165
3166  if (HasChain) {
3167    SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
3168    if (OnlyLoad)
3169      PendingLoads.push_back(Chain);
3170    else
3171      DAG.setRoot(Chain);
3172  }
3173
3174  if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
3175    if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
3176      EVT VT = TLI.getValueType(PTy);
3177      Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
3178
3179      if (DisableScheduling)
3180        DAG.AssignOrdering(Result.getNode(), SDNodeOrder);
3181    }
3182
3183    setValue(&I, Result);
3184  }
3185}
3186
3187/// GetSignificand - Get the significand and build it into a floating-point
3188/// number with exponent of 1:
3189///
3190///   Op = (Op & 0x007fffff) | 0x3f800000;
3191///
3192/// where Op is the hexidecimal representation of floating point value.
3193static SDValue
3194GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl, unsigned Order) {
3195  SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3196                           DAG.getConstant(0x007fffff, MVT::i32));
3197  SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3198                           DAG.getConstant(0x3f800000, MVT::i32));
3199  SDValue Res = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3200
3201  if (DisableScheduling) {
3202    DAG.AssignOrdering(t1.getNode(), Order);
3203    DAG.AssignOrdering(t2.getNode(), Order);
3204    DAG.AssignOrdering(Res.getNode(), Order);
3205  }
3206
3207  return Res;
3208}
3209
3210/// GetExponent - Get the exponent:
3211///
3212///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3213///
3214/// where Op is the hexidecimal representation of floating point value.
3215static SDValue
3216GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3217            DebugLoc dl, unsigned Order) {
3218  SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3219                           DAG.getConstant(0x7f800000, MVT::i32));
3220  SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3221                           DAG.getConstant(23, TLI.getPointerTy()));
3222  SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3223                           DAG.getConstant(127, MVT::i32));
3224  SDValue Res = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3225
3226  if (DisableScheduling) {
3227    DAG.AssignOrdering(t0.getNode(), Order);
3228    DAG.AssignOrdering(t1.getNode(), Order);
3229    DAG.AssignOrdering(t2.getNode(), Order);
3230    DAG.AssignOrdering(Res.getNode(), Order);
3231  }
3232
3233  return Res;
3234}
3235
3236/// getF32Constant - Get 32-bit floating point constant.
3237static SDValue
3238getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3239  return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3240}
3241
3242/// Inlined utility function to implement binary input atomic intrinsics for
3243/// visitIntrinsicCall: I is a call instruction
3244///                     Op is the associated NodeType for I
3245const char *
3246SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3247  SDValue Root = getRoot();
3248  SDValue L =
3249    DAG.getAtomic(Op, getCurDebugLoc(),
3250                  getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3251                  Root,
3252                  getValue(I.getOperand(1)),
3253                  getValue(I.getOperand(2)),
3254                  I.getOperand(1));
3255  setValue(&I, L);
3256  DAG.setRoot(L.getValue(1));
3257
3258  if (DisableScheduling)
3259    DAG.AssignOrdering(L.getNode(), SDNodeOrder);
3260
3261  return 0;
3262}
3263
3264// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3265const char *
3266SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3267  SDValue Op1 = getValue(I.getOperand(1));
3268  SDValue Op2 = getValue(I.getOperand(2));
3269
3270  SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3271  SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3272
3273  setValue(&I, Result);
3274
3275  if (DisableScheduling)
3276    DAG.AssignOrdering(Result.getNode(), SDNodeOrder);
3277
3278  return 0;
3279}
3280
3281/// visitExp - Lower an exp intrinsic. Handles the special sequences for
3282/// limited-precision mode.
3283void
3284SelectionDAGBuilder::visitExp(CallInst &I) {
3285  SDValue result;
3286  DebugLoc dl = getCurDebugLoc();
3287
3288  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3289      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3290    SDValue Op = getValue(I.getOperand(1));
3291
3292    // Put the exponent in the right bit position for later addition to the
3293    // final result:
3294    //
3295    //   #define LOG2OFe 1.4426950f
3296    //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3297    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3298                             getF32Constant(DAG, 0x3fb8aa3b));
3299    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3300
3301    //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3302    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3303    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3304
3305    if (DisableScheduling) {
3306      DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3307      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3308      DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3309      DAG.AssignOrdering(X.getNode(), SDNodeOrder);
3310    }
3311
3312    //   IntegerPartOfX <<= 23;
3313    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3314                                 DAG.getConstant(23, TLI.getPointerTy()));
3315
3316    if (DisableScheduling)
3317      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3318
3319    if (LimitFloatPrecision <= 6) {
3320      // For floating-point precision of 6:
3321      //
3322      //   TwoToFractionalPartOfX =
3323      //     0.997535578f +
3324      //       (0.735607626f + 0.252464424f * x) * x;
3325      //
3326      // error 0.0144103317, which is 6 bits
3327      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3328                               getF32Constant(DAG, 0x3e814304));
3329      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3330                               getF32Constant(DAG, 0x3f3c50c8));
3331      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3332      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3333                               getF32Constant(DAG, 0x3f7f5e7e));
3334      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3335
3336      // Add the exponent into the result in integer domain.
3337      SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3338                               TwoToFracPartOfX, IntegerPartOfX);
3339
3340      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3341
3342      if (DisableScheduling) {
3343        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3344        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3345        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3346        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3347        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3348        DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder);
3349        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3350      }
3351    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3352      // For floating-point precision of 12:
3353      //
3354      //   TwoToFractionalPartOfX =
3355      //     0.999892986f +
3356      //       (0.696457318f +
3357      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3358      //
3359      // 0.000107046256 error, which is 13 to 14 bits
3360      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3361                               getF32Constant(DAG, 0x3da235e3));
3362      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3363                               getF32Constant(DAG, 0x3e65b8f3));
3364      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3365      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3366                               getF32Constant(DAG, 0x3f324b07));
3367      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3368      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3369                               getF32Constant(DAG, 0x3f7ff8fd));
3370      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3371
3372      // Add the exponent into the result in integer domain.
3373      SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3374                               TwoToFracPartOfX, IntegerPartOfX);
3375
3376      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3377
3378      if (DisableScheduling) {
3379        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3380        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3381        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3382        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3383        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3384        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3385        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3386        DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder);
3387        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3388      }
3389    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3390      // For floating-point precision of 18:
3391      //
3392      //   TwoToFractionalPartOfX =
3393      //     0.999999982f +
3394      //       (0.693148872f +
3395      //         (0.240227044f +
3396      //           (0.554906021e-1f +
3397      //             (0.961591928e-2f +
3398      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3399      //
3400      // error 2.47208000*10^(-7), which is better than 18 bits
3401      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3402                               getF32Constant(DAG, 0x3924b03e));
3403      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3404                               getF32Constant(DAG, 0x3ab24b87));
3405      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3406      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3407                               getF32Constant(DAG, 0x3c1d8c17));
3408      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3409      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3410                               getF32Constant(DAG, 0x3d634a1d));
3411      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3412      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3413                               getF32Constant(DAG, 0x3e75fe14));
3414      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3415      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3416                                getF32Constant(DAG, 0x3f317234));
3417      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3418      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3419                                getF32Constant(DAG, 0x3f800000));
3420      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3421                                             MVT::i32, t13);
3422
3423      // Add the exponent into the result in integer domain.
3424      SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3425                                TwoToFracPartOfX, IntegerPartOfX);
3426
3427      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3428
3429      if (DisableScheduling) {
3430        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3431        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3432        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3433        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3434        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3435        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3436        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3437        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
3438        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
3439        DAG.AssignOrdering(t11.getNode(), SDNodeOrder);
3440        DAG.AssignOrdering(t12.getNode(), SDNodeOrder);
3441        DAG.AssignOrdering(t13.getNode(), SDNodeOrder);
3442        DAG.AssignOrdering(t14.getNode(), SDNodeOrder);
3443        DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder);
3444        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3445      }
3446    }
3447  } else {
3448    // No special expansion.
3449    result = DAG.getNode(ISD::FEXP, dl,
3450                         getValue(I.getOperand(1)).getValueType(),
3451                         getValue(I.getOperand(1)));
3452    if (DisableScheduling)
3453      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3454  }
3455
3456  setValue(&I, result);
3457}
3458
3459/// visitLog - Lower a log intrinsic. Handles the special sequences for
3460/// limited-precision mode.
3461void
3462SelectionDAGBuilder::visitLog(CallInst &I) {
3463  SDValue result;
3464  DebugLoc dl = getCurDebugLoc();
3465
3466  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3467      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3468    SDValue Op = getValue(I.getOperand(1));
3469    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3470
3471    if (DisableScheduling)
3472      DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
3473
3474    // Scale the exponent by log(2) [0.69314718f].
3475    SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3476    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3477                                        getF32Constant(DAG, 0x3f317218));
3478
3479    if (DisableScheduling)
3480      DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder);
3481
3482    // Get the significand and build it into a floating-point number with
3483    // exponent of 1.
3484    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3485
3486    if (LimitFloatPrecision <= 6) {
3487      // For floating-point precision of 6:
3488      //
3489      //   LogofMantissa =
3490      //     -1.1609546f +
3491      //       (1.4034025f - 0.23903021f * x) * x;
3492      //
3493      // error 0.0034276066, which is better than 8 bits
3494      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3495                               getF32Constant(DAG, 0xbe74c456));
3496      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3497                               getF32Constant(DAG, 0x3fb3a2b1));
3498      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3499      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3500                                          getF32Constant(DAG, 0x3f949a29));
3501
3502      result = DAG.getNode(ISD::FADD, dl,
3503                           MVT::f32, LogOfExponent, LogOfMantissa);
3504
3505      if (DisableScheduling) {
3506        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3507        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3508        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3509        DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder);
3510        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3511      }
3512    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3513      // For floating-point precision of 12:
3514      //
3515      //   LogOfMantissa =
3516      //     -1.7417939f +
3517      //       (2.8212026f +
3518      //         (-1.4699568f +
3519      //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3520      //
3521      // error 0.000061011436, which is 14 bits
3522      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3523                               getF32Constant(DAG, 0xbd67b6d6));
3524      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3525                               getF32Constant(DAG, 0x3ee4f4b8));
3526      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3527      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3528                               getF32Constant(DAG, 0x3fbc278b));
3529      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3530      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3531                               getF32Constant(DAG, 0x40348e95));
3532      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3533      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3534                                          getF32Constant(DAG, 0x3fdef31a));
3535
3536      result = DAG.getNode(ISD::FADD, dl,
3537                           MVT::f32, LogOfExponent, LogOfMantissa);
3538
3539      if (DisableScheduling) {
3540        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3541        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3542        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3543        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3544        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3545        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3546        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3547        DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder);
3548        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3549      }
3550    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3551      // For floating-point precision of 18:
3552      //
3553      //   LogOfMantissa =
3554      //     -2.1072184f +
3555      //       (4.2372794f +
3556      //         (-3.7029485f +
3557      //           (2.2781945f +
3558      //             (-0.87823314f +
3559      //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3560      //
3561      // error 0.0000023660568, which is better than 18 bits
3562      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3563                               getF32Constant(DAG, 0xbc91e5ac));
3564      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3565                               getF32Constant(DAG, 0x3e4350aa));
3566      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3567      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3568                               getF32Constant(DAG, 0x3f60d3e3));
3569      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3570      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3571                               getF32Constant(DAG, 0x4011cdf0));
3572      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3573      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3574                               getF32Constant(DAG, 0x406cfd1c));
3575      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3576      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3577                               getF32Constant(DAG, 0x408797cb));
3578      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3579      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3580                                          getF32Constant(DAG, 0x4006dcab));
3581
3582      result = DAG.getNode(ISD::FADD, dl,
3583                           MVT::f32, LogOfExponent, LogOfMantissa);
3584
3585      if (DisableScheduling) {
3586        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3587        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3588        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3589        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3590        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3591        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3592        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3593        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3594        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3595        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
3596        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
3597        DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder);
3598        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3599      }
3600    }
3601  } else {
3602    // No special expansion.
3603    result = DAG.getNode(ISD::FLOG, dl,
3604                         getValue(I.getOperand(1)).getValueType(),
3605                         getValue(I.getOperand(1)));
3606
3607    if (DisableScheduling)
3608      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3609  }
3610
3611  setValue(&I, result);
3612}
3613
3614/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3615/// limited-precision mode.
3616void
3617SelectionDAGBuilder::visitLog2(CallInst &I) {
3618  SDValue result;
3619  DebugLoc dl = getCurDebugLoc();
3620
3621  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3622      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3623    SDValue Op = getValue(I.getOperand(1));
3624    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3625
3626    if (DisableScheduling)
3627      DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
3628
3629    // Get the exponent.
3630    SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3631
3632    if (DisableScheduling)
3633      DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder);
3634
3635    // Get the significand and build it into a floating-point number with
3636    // exponent of 1.
3637    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3638
3639    // Different possible minimax approximations of significand in
3640    // floating-point for various degrees of accuracy over [1,2].
3641    if (LimitFloatPrecision <= 6) {
3642      // For floating-point precision of 6:
3643      //
3644      //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3645      //
3646      // error 0.0049451742, which is more than 7 bits
3647      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3648                               getF32Constant(DAG, 0xbeb08fe0));
3649      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3650                               getF32Constant(DAG, 0x40019463));
3651      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3652      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3653                                           getF32Constant(DAG, 0x3fd6633d));
3654
3655      result = DAG.getNode(ISD::FADD, dl,
3656                           MVT::f32, LogOfExponent, Log2ofMantissa);
3657
3658      if (DisableScheduling) {
3659        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3660        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3661        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3662        DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder);
3663        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3664      }
3665    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3666      // For floating-point precision of 12:
3667      //
3668      //   Log2ofMantissa =
3669      //     -2.51285454f +
3670      //       (4.07009056f +
3671      //         (-2.12067489f +
3672      //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3673      //
3674      // error 0.0000876136000, which is better than 13 bits
3675      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3676                               getF32Constant(DAG, 0xbda7262e));
3677      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3678                               getF32Constant(DAG, 0x3f25280b));
3679      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3680      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3681                               getF32Constant(DAG, 0x4007b923));
3682      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3683      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3684                               getF32Constant(DAG, 0x40823e2f));
3685      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3686      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3687                                           getF32Constant(DAG, 0x4020d29c));
3688
3689      result = DAG.getNode(ISD::FADD, dl,
3690                           MVT::f32, LogOfExponent, Log2ofMantissa);
3691
3692      if (DisableScheduling) {
3693        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3694        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3695        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3696        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3697        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3698        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3699        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3700        DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder);
3701        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3702      }
3703    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3704      // For floating-point precision of 18:
3705      //
3706      //   Log2ofMantissa =
3707      //     -3.0400495f +
3708      //       (6.1129976f +
3709      //         (-5.3420409f +
3710      //           (3.2865683f +
3711      //             (-1.2669343f +
3712      //               (0.27515199f -
3713      //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3714      //
3715      // error 0.0000018516, which is better than 18 bits
3716      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3717                               getF32Constant(DAG, 0xbcd2769e));
3718      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3719                               getF32Constant(DAG, 0x3e8ce0b9));
3720      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3721      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3722                               getF32Constant(DAG, 0x3fa22ae7));
3723      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3724      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3725                               getF32Constant(DAG, 0x40525723));
3726      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3727      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3728                               getF32Constant(DAG, 0x40aaf200));
3729      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3730      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3731                               getF32Constant(DAG, 0x40c39dad));
3732      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3733      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3734                                           getF32Constant(DAG, 0x4042902c));
3735
3736      result = DAG.getNode(ISD::FADD, dl,
3737                           MVT::f32, LogOfExponent, Log2ofMantissa);
3738
3739      if (DisableScheduling) {
3740        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3741        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3742        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3743        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3744        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3745        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3746        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3747        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3748        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3749        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
3750        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
3751        DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder);
3752        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3753      }
3754    }
3755  } else {
3756    // No special expansion.
3757    result = DAG.getNode(ISD::FLOG2, dl,
3758                         getValue(I.getOperand(1)).getValueType(),
3759                         getValue(I.getOperand(1)));
3760
3761    if (DisableScheduling)
3762      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3763  }
3764
3765  setValue(&I, result);
3766}
3767
3768/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3769/// limited-precision mode.
3770void
3771SelectionDAGBuilder::visitLog10(CallInst &I) {
3772  SDValue result;
3773  DebugLoc dl = getCurDebugLoc();
3774
3775  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3776      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3777    SDValue Op = getValue(I.getOperand(1));
3778    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3779
3780    if (DisableScheduling)
3781      DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
3782
3783    // Scale the exponent by log10(2) [0.30102999f].
3784    SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3785    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3786                                        getF32Constant(DAG, 0x3e9a209a));
3787
3788    if (DisableScheduling)
3789      DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder);
3790
3791    // Get the significand and build it into a floating-point number with
3792    // exponent of 1.
3793    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3794
3795    if (LimitFloatPrecision <= 6) {
3796      // For floating-point precision of 6:
3797      //
3798      //   Log10ofMantissa =
3799      //     -0.50419619f +
3800      //       (0.60948995f - 0.10380950f * x) * x;
3801      //
3802      // error 0.0014886165, which is 6 bits
3803      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3804                               getF32Constant(DAG, 0xbdd49a13));
3805      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3806                               getF32Constant(DAG, 0x3f1c0789));
3807      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3808      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3809                                            getF32Constant(DAG, 0x3f011300));
3810
3811      result = DAG.getNode(ISD::FADD, dl,
3812                           MVT::f32, LogOfExponent, Log10ofMantissa);
3813
3814      if (DisableScheduling) {
3815        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3816        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3817        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3818        DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder);
3819        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3820      }
3821    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3822      // For floating-point precision of 12:
3823      //
3824      //   Log10ofMantissa =
3825      //     -0.64831180f +
3826      //       (0.91751397f +
3827      //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3828      //
3829      // error 0.00019228036, which is better than 12 bits
3830      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3831                               getF32Constant(DAG, 0x3d431f31));
3832      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3833                               getF32Constant(DAG, 0x3ea21fb2));
3834      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3835      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3836                               getF32Constant(DAG, 0x3f6ae232));
3837      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3838      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3839                                            getF32Constant(DAG, 0x3f25f7c3));
3840
3841      result = DAG.getNode(ISD::FADD, dl,
3842                           MVT::f32, LogOfExponent, Log10ofMantissa);
3843
3844      if (DisableScheduling) {
3845        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3846        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3847        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3848        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3849        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3850        DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder);
3851        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3852      }
3853    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3854      // For floating-point precision of 18:
3855      //
3856      //   Log10ofMantissa =
3857      //     -0.84299375f +
3858      //       (1.5327582f +
3859      //         (-1.0688956f +
3860      //           (0.49102474f +
3861      //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3862      //
3863      // error 0.0000037995730, which is better than 18 bits
3864      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3865                               getF32Constant(DAG, 0x3c5d51ce));
3866      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3867                               getF32Constant(DAG, 0x3e00685a));
3868      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3869      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3870                               getF32Constant(DAG, 0x3efb6798));
3871      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3872      SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3873                               getF32Constant(DAG, 0x3f88d192));
3874      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3875      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3876                               getF32Constant(DAG, 0x3fc4316c));
3877      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3878      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3879                                            getF32Constant(DAG, 0x3f57ce70));
3880
3881      result = DAG.getNode(ISD::FADD, dl,
3882                           MVT::f32, LogOfExponent, Log10ofMantissa);
3883
3884      if (DisableScheduling) {
3885        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3886        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3887        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3888        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3889        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3890        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3891        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3892        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3893        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3894        DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder);
3895        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3896      }
3897    }
3898  } else {
3899    // No special expansion.
3900    result = DAG.getNode(ISD::FLOG10, dl,
3901                         getValue(I.getOperand(1)).getValueType(),
3902                         getValue(I.getOperand(1)));
3903
3904    if (DisableScheduling)
3905      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3906  }
3907
3908  setValue(&I, result);
3909}
3910
3911/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3912/// limited-precision mode.
3913void
3914SelectionDAGBuilder::visitExp2(CallInst &I) {
3915  SDValue result;
3916  DebugLoc dl = getCurDebugLoc();
3917
3918  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3919      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3920    SDValue Op = getValue(I.getOperand(1));
3921
3922    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3923
3924    if (DisableScheduling)
3925      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3926
3927    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3928    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3929    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3930
3931    //   IntegerPartOfX <<= 23;
3932    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3933                                 DAG.getConstant(23, TLI.getPointerTy()));
3934
3935    if (DisableScheduling) {
3936      DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3937      DAG.AssignOrdering(X.getNode(), SDNodeOrder);
3938      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3939    }
3940
3941    if (LimitFloatPrecision <= 6) {
3942      // For floating-point precision of 6:
3943      //
3944      //   TwoToFractionalPartOfX =
3945      //     0.997535578f +
3946      //       (0.735607626f + 0.252464424f * x) * x;
3947      //
3948      // error 0.0144103317, which is 6 bits
3949      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3950                               getF32Constant(DAG, 0x3e814304));
3951      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3952                               getF32Constant(DAG, 0x3f3c50c8));
3953      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3954      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3955                               getF32Constant(DAG, 0x3f7f5e7e));
3956      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3957      SDValue TwoToFractionalPartOfX =
3958        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3959
3960      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3961                           MVT::f32, TwoToFractionalPartOfX);
3962
3963      if (DisableScheduling) {
3964        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3965        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3966        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3967        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3968        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3969        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
3970        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3971      }
3972    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3973      // For floating-point precision of 12:
3974      //
3975      //   TwoToFractionalPartOfX =
3976      //     0.999892986f +
3977      //       (0.696457318f +
3978      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3979      //
3980      // error 0.000107046256, which is 13 to 14 bits
3981      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3982                               getF32Constant(DAG, 0x3da235e3));
3983      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3984                               getF32Constant(DAG, 0x3e65b8f3));
3985      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3986      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3987                               getF32Constant(DAG, 0x3f324b07));
3988      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3989      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3990                               getF32Constant(DAG, 0x3f7ff8fd));
3991      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3992      SDValue TwoToFractionalPartOfX =
3993        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3994
3995      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3996                           MVT::f32, TwoToFractionalPartOfX);
3997
3998      if (DisableScheduling) {
3999        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4000        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4001        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4002        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4003        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4004        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4005        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4006        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4007        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4008      }
4009    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
4010      // For floating-point precision of 18:
4011      //
4012      //   TwoToFractionalPartOfX =
4013      //     0.999999982f +
4014      //       (0.693148872f +
4015      //         (0.240227044f +
4016      //           (0.554906021e-1f +
4017      //             (0.961591928e-2f +
4018      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4019      // error 2.47208000*10^(-7), which is better than 18 bits
4020      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4021                               getF32Constant(DAG, 0x3924b03e));
4022      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4023                               getF32Constant(DAG, 0x3ab24b87));
4024      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4025      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4026                               getF32Constant(DAG, 0x3c1d8c17));
4027      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4028      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4029                               getF32Constant(DAG, 0x3d634a1d));
4030      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4031      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4032                               getF32Constant(DAG, 0x3e75fe14));
4033      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4034      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4035                                getF32Constant(DAG, 0x3f317234));
4036      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4037      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4038                                getF32Constant(DAG, 0x3f800000));
4039      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
4040      SDValue TwoToFractionalPartOfX =
4041        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
4042
4043      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4044                           MVT::f32, TwoToFractionalPartOfX);
4045
4046      if (DisableScheduling) {
4047        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4048        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4049        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4050        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4051        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4052        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4053        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4054        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
4055        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
4056        DAG.AssignOrdering(t11.getNode(), SDNodeOrder);
4057        DAG.AssignOrdering(t12.getNode(), SDNodeOrder);
4058        DAG.AssignOrdering(t13.getNode(), SDNodeOrder);
4059        DAG.AssignOrdering(t14.getNode(), SDNodeOrder);
4060        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4061        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4062      }
4063    }
4064  } else {
4065    // No special expansion.
4066    result = DAG.getNode(ISD::FEXP2, dl,
4067                         getValue(I.getOperand(1)).getValueType(),
4068                         getValue(I.getOperand(1)));
4069
4070    if (DisableScheduling)
4071      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4072  }
4073
4074  setValue(&I, result);
4075}
4076
4077/// visitPow - Lower a pow intrinsic. Handles the special sequences for
4078/// limited-precision mode with x == 10.0f.
4079void
4080SelectionDAGBuilder::visitPow(CallInst &I) {
4081  SDValue result;
4082  Value *Val = I.getOperand(1);
4083  DebugLoc dl = getCurDebugLoc();
4084  bool IsExp10 = false;
4085
4086  if (getValue(Val).getValueType() == MVT::f32 &&
4087      getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
4088      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4089    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
4090      if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
4091        APFloat Ten(10.0f);
4092        IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
4093      }
4094    }
4095  }
4096
4097  if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4098    SDValue Op = getValue(I.getOperand(2));
4099
4100    // Put the exponent in the right bit position for later addition to the
4101    // final result:
4102    //
4103    //   #define LOG2OF10 3.3219281f
4104    //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
4105    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4106                             getF32Constant(DAG, 0x40549a78));
4107    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4108
4109    //   FractionalPartOfX = x - (float)IntegerPartOfX;
4110    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4111    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4112
4113    if (DisableScheduling) {
4114      DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
4115      DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
4116      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
4117      DAG.AssignOrdering(X.getNode(), SDNodeOrder);
4118    }
4119
4120    //   IntegerPartOfX <<= 23;
4121    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4122                                 DAG.getConstant(23, TLI.getPointerTy()));
4123
4124    if (DisableScheduling)
4125      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
4126
4127    if (LimitFloatPrecision <= 6) {
4128      // For floating-point precision of 6:
4129      //
4130      //   twoToFractionalPartOfX =
4131      //     0.997535578f +
4132      //       (0.735607626f + 0.252464424f * x) * x;
4133      //
4134      // error 0.0144103317, which is 6 bits
4135      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4136                               getF32Constant(DAG, 0x3e814304));
4137      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4138                               getF32Constant(DAG, 0x3f3c50c8));
4139      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4140      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4141                               getF32Constant(DAG, 0x3f7f5e7e));
4142      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
4143      SDValue TwoToFractionalPartOfX =
4144        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
4145
4146      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4147                           MVT::f32, TwoToFractionalPartOfX);
4148
4149      if (DisableScheduling) {
4150        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4151        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4152        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4153        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4154        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4155        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4156        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4157      }
4158    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
4159      // For floating-point precision of 12:
4160      //
4161      //   TwoToFractionalPartOfX =
4162      //     0.999892986f +
4163      //       (0.696457318f +
4164      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4165      //
4166      // error 0.000107046256, which is 13 to 14 bits
4167      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4168                               getF32Constant(DAG, 0x3da235e3));
4169      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4170                               getF32Constant(DAG, 0x3e65b8f3));
4171      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4172      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4173                               getF32Constant(DAG, 0x3f324b07));
4174      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4175      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4176                               getF32Constant(DAG, 0x3f7ff8fd));
4177      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
4178      SDValue TwoToFractionalPartOfX =
4179        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
4180
4181      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4182                           MVT::f32, TwoToFractionalPartOfX);
4183
4184      if (DisableScheduling) {
4185        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4186        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4187        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4188        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4189        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4190        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4191        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4192        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4193        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4194      }
4195    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
4196      // For floating-point precision of 18:
4197      //
4198      //   TwoToFractionalPartOfX =
4199      //     0.999999982f +
4200      //       (0.693148872f +
4201      //         (0.240227044f +
4202      //           (0.554906021e-1f +
4203      //             (0.961591928e-2f +
4204      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4205      // error 2.47208000*10^(-7), which is better than 18 bits
4206      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4207                               getF32Constant(DAG, 0x3924b03e));
4208      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4209                               getF32Constant(DAG, 0x3ab24b87));
4210      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4211      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4212                               getF32Constant(DAG, 0x3c1d8c17));
4213      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4214      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4215                               getF32Constant(DAG, 0x3d634a1d));
4216      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4217      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4218                               getF32Constant(DAG, 0x3e75fe14));
4219      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4220      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4221                                getF32Constant(DAG, 0x3f317234));
4222      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4223      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4224                                getF32Constant(DAG, 0x3f800000));
4225      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
4226      SDValue TwoToFractionalPartOfX =
4227        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
4228
4229      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4230                           MVT::f32, TwoToFractionalPartOfX);
4231
4232      if (DisableScheduling) {
4233        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4234        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4235        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4236        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4237        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4238        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4239        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4240        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
4241        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
4242        DAG.AssignOrdering(t11.getNode(), SDNodeOrder);
4243        DAG.AssignOrdering(t12.getNode(), SDNodeOrder);
4244        DAG.AssignOrdering(t13.getNode(), SDNodeOrder);
4245        DAG.AssignOrdering(t14.getNode(), SDNodeOrder);
4246        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4247        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4248      }
4249    }
4250  } else {
4251    // No special expansion.
4252    result = DAG.getNode(ISD::FPOW, dl,
4253                         getValue(I.getOperand(1)).getValueType(),
4254                         getValue(I.getOperand(1)),
4255                         getValue(I.getOperand(2)));
4256
4257    if (DisableScheduling)
4258      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4259  }
4260
4261  setValue(&I, result);
4262}
4263
4264/// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
4265/// we want to emit this as a call to a named external function, return the name
4266/// otherwise lower it and return null.
4267const char *
4268SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
4269  DebugLoc dl = getCurDebugLoc();
4270  SDValue Res;
4271
4272  switch (Intrinsic) {
4273  default:
4274    // By default, turn this into a target intrinsic node.
4275    visitTargetIntrinsic(I, Intrinsic);
4276    return 0;
4277  case Intrinsic::vastart:  visitVAStart(I); return 0;
4278  case Intrinsic::vaend:    visitVAEnd(I); return 0;
4279  case Intrinsic::vacopy:   visitVACopy(I); return 0;
4280  case Intrinsic::returnaddress:
4281    Res = DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
4282                      getValue(I.getOperand(1)));
4283    setValue(&I, Res);
4284    if (DisableScheduling)
4285      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4286    return 0;
4287  case Intrinsic::frameaddress:
4288    Res = DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
4289                      getValue(I.getOperand(1)));
4290    setValue(&I, Res);
4291    if (DisableScheduling)
4292      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4293    return 0;
4294  case Intrinsic::setjmp:
4295    return "_setjmp"+!TLI.usesUnderscoreSetJmp();
4296  case Intrinsic::longjmp:
4297    return "_longjmp"+!TLI.usesUnderscoreLongJmp();
4298  case Intrinsic::memcpy: {
4299    SDValue Op1 = getValue(I.getOperand(1));
4300    SDValue Op2 = getValue(I.getOperand(2));
4301    SDValue Op3 = getValue(I.getOperand(3));
4302    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
4303    Res = DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
4304                        I.getOperand(1), 0, I.getOperand(2), 0);
4305    DAG.setRoot(Res);
4306    if (DisableScheduling)
4307      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4308    return 0;
4309  }
4310  case Intrinsic::memset: {
4311    SDValue Op1 = getValue(I.getOperand(1));
4312    SDValue Op2 = getValue(I.getOperand(2));
4313    SDValue Op3 = getValue(I.getOperand(3));
4314    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
4315    Res = DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
4316                        I.getOperand(1), 0);
4317    DAG.setRoot(Res);
4318    if (DisableScheduling)
4319      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4320    return 0;
4321  }
4322  case Intrinsic::memmove: {
4323    SDValue Op1 = getValue(I.getOperand(1));
4324    SDValue Op2 = getValue(I.getOperand(2));
4325    SDValue Op3 = getValue(I.getOperand(3));
4326    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
4327
4328    // If the source and destination are known to not be aliases, we can
4329    // lower memmove as memcpy.
4330    uint64_t Size = -1ULL;
4331    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
4332      Size = C->getZExtValue();
4333    if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
4334        AliasAnalysis::NoAlias) {
4335      Res = DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
4336                          I.getOperand(1), 0, I.getOperand(2), 0);
4337      DAG.setRoot(Res);
4338      if (DisableScheduling)
4339        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4340      return 0;
4341    }
4342
4343    Res = DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
4344                         I.getOperand(1), 0, I.getOperand(2), 0);
4345    DAG.setRoot(Res);
4346    if (DisableScheduling)
4347      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4348    return 0;
4349  }
4350  case Intrinsic::dbg_stoppoint:
4351  case Intrinsic::dbg_region_start:
4352  case Intrinsic::dbg_region_end:
4353  case Intrinsic::dbg_func_start:
4354    // FIXME - Remove this instructions once the dust settles.
4355    return 0;
4356  case Intrinsic::dbg_declare: {
4357    if (OptLevel != CodeGenOpt::None)
4358      // FIXME: Variable debug info is not supported here.
4359      return 0;
4360    DwarfWriter *DW = DAG.getDwarfWriter();
4361    if (!DW)
4362      return 0;
4363    DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4364    if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
4365      return 0;
4366
4367    MDNode *Variable = DI.getVariable();
4368    Value *Address = DI.getAddress();
4369    if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4370      Address = BCI->getOperand(0);
4371    AllocaInst *AI = dyn_cast<AllocaInst>(Address);
4372    // Don't handle byval struct arguments or VLAs, for example.
4373    if (!AI)
4374      return 0;
4375    DenseMap<const AllocaInst*, int>::iterator SI =
4376      FuncInfo.StaticAllocaMap.find(AI);
4377    if (SI == FuncInfo.StaticAllocaMap.end())
4378      return 0; // VLAs.
4379    int FI = SI->second;
4380
4381    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4382    if (MMI) {
4383      MetadataContext &TheMetadata =
4384        DI.getParent()->getContext().getMetadata();
4385      unsigned MDDbgKind = TheMetadata.getMDKind("dbg");
4386      MDNode *Dbg = TheMetadata.getMD(MDDbgKind, &DI);
4387      MMI->setVariableDbgInfo(Variable, FI, Dbg);
4388    }
4389    return 0;
4390  }
4391  case Intrinsic::eh_exception: {
4392    // Insert the EXCEPTIONADDR instruction.
4393    assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
4394    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4395    SDValue Ops[1];
4396    Ops[0] = DAG.getRoot();
4397    SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
4398    setValue(&I, Op);
4399    DAG.setRoot(Op.getValue(1));
4400    if (DisableScheduling)
4401      DAG.AssignOrdering(Op.getNode(), SDNodeOrder);
4402    return 0;
4403  }
4404
4405  case Intrinsic::eh_selector: {
4406    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4407
4408    if (CurMBB->isLandingPad())
4409      AddCatchInfo(I, MMI, CurMBB);
4410    else {
4411#ifndef NDEBUG
4412      FuncInfo.CatchInfoLost.insert(&I);
4413#endif
4414      // FIXME: Mark exception selector register as live in.  Hack for PR1508.
4415      unsigned Reg = TLI.getExceptionSelectorRegister();
4416      if (Reg) CurMBB->addLiveIn(Reg);
4417    }
4418
4419    // Insert the EHSELECTION instruction.
4420    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4421    SDValue Ops[2];
4422    Ops[0] = getValue(I.getOperand(1));
4423    Ops[1] = getRoot();
4424    SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4425
4426    DAG.setRoot(Op.getValue(1));
4427
4428    Res = DAG.getSExtOrTrunc(Op, dl, MVT::i32);
4429    setValue(&I, Res);
4430    if (DisableScheduling) {
4431      DAG.AssignOrdering(Op.getNode(), SDNodeOrder);
4432      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4433    }
4434    return 0;
4435  }
4436
4437  case Intrinsic::eh_typeid_for: {
4438    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4439
4440    if (MMI) {
4441      // Find the type id for the given typeinfo.
4442      GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4443      unsigned TypeID = MMI->getTypeIDFor(GV);
4444      Res = DAG.getConstant(TypeID, MVT::i32);
4445    } else {
4446      // Return something different to eh_selector.
4447      Res = DAG.getConstant(1, MVT::i32);
4448    }
4449
4450    setValue(&I, Res);
4451    if (DisableScheduling)
4452      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4453    return 0;
4454  }
4455
4456  case Intrinsic::eh_return_i32:
4457  case Intrinsic::eh_return_i64:
4458    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4459      MMI->setCallsEHReturn(true);
4460      Res = DAG.getNode(ISD::EH_RETURN, dl,
4461                        MVT::Other,
4462                        getControlRoot(),
4463                        getValue(I.getOperand(1)),
4464                        getValue(I.getOperand(2)));
4465      DAG.setRoot(Res);
4466      if (DisableScheduling)
4467        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4468    } else {
4469      setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4470    }
4471
4472    return 0;
4473  case Intrinsic::eh_unwind_init:
4474    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4475      MMI->setCallsUnwindInit(true);
4476    }
4477    return 0;
4478  case Intrinsic::eh_dwarf_cfa: {
4479    EVT VT = getValue(I.getOperand(1)).getValueType();
4480    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
4481                                        TLI.getPointerTy());
4482    SDValue Offset = DAG.getNode(ISD::ADD, dl,
4483                                 TLI.getPointerTy(),
4484                                 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4485                                             TLI.getPointerTy()),
4486                                 CfaArg);
4487    SDValue FA = DAG.getNode(ISD::FRAMEADDR, dl,
4488                             TLI.getPointerTy(),
4489                             DAG.getConstant(0, TLI.getPointerTy()));
4490    Res = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
4491                      FA, Offset);
4492    setValue(&I, Res);
4493    if (DisableScheduling) {
4494      DAG.AssignOrdering(CfaArg.getNode(), SDNodeOrder);
4495      DAG.AssignOrdering(Offset.getNode(), SDNodeOrder);
4496      DAG.AssignOrdering(FA.getNode(), SDNodeOrder);
4497      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4498    }
4499    return 0;
4500  }
4501  case Intrinsic::convertff:
4502  case Intrinsic::convertfsi:
4503  case Intrinsic::convertfui:
4504  case Intrinsic::convertsif:
4505  case Intrinsic::convertuif:
4506  case Intrinsic::convertss:
4507  case Intrinsic::convertsu:
4508  case Intrinsic::convertus:
4509  case Intrinsic::convertuu: {
4510    ISD::CvtCode Code = ISD::CVT_INVALID;
4511    switch (Intrinsic) {
4512    case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
4513    case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4514    case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4515    case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4516    case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4517    case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
4518    case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
4519    case Intrinsic::convertus:  Code = ISD::CVT_US; break;
4520    case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
4521    }
4522    EVT DestVT = TLI.getValueType(I.getType());
4523    Value *Op1 = I.getOperand(1);
4524    Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4525                               DAG.getValueType(DestVT),
4526                               DAG.getValueType(getValue(Op1).getValueType()),
4527                               getValue(I.getOperand(2)),
4528                               getValue(I.getOperand(3)),
4529                               Code);
4530    setValue(&I, Res);
4531    if (DisableScheduling)
4532      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4533    return 0;
4534  }
4535  case Intrinsic::sqrt:
4536    Res = DAG.getNode(ISD::FSQRT, dl,
4537                      getValue(I.getOperand(1)).getValueType(),
4538                      getValue(I.getOperand(1)));
4539    setValue(&I, Res);
4540    if (DisableScheduling)
4541      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4542    return 0;
4543  case Intrinsic::powi:
4544    Res = DAG.getNode(ISD::FPOWI, dl,
4545                      getValue(I.getOperand(1)).getValueType(),
4546                      getValue(I.getOperand(1)),
4547                      getValue(I.getOperand(2)));
4548    setValue(&I, Res);
4549    if (DisableScheduling)
4550      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4551    return 0;
4552  case Intrinsic::sin:
4553    Res = DAG.getNode(ISD::FSIN, dl,
4554                      getValue(I.getOperand(1)).getValueType(),
4555                      getValue(I.getOperand(1)));
4556    setValue(&I, Res);
4557    if (DisableScheduling)
4558      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4559    return 0;
4560  case Intrinsic::cos:
4561    Res = DAG.getNode(ISD::FCOS, dl,
4562                      getValue(I.getOperand(1)).getValueType(),
4563                      getValue(I.getOperand(1)));
4564    setValue(&I, Res);
4565    if (DisableScheduling)
4566      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4567    return 0;
4568  case Intrinsic::log:
4569    visitLog(I);
4570    return 0;
4571  case Intrinsic::log2:
4572    visitLog2(I);
4573    return 0;
4574  case Intrinsic::log10:
4575    visitLog10(I);
4576    return 0;
4577  case Intrinsic::exp:
4578    visitExp(I);
4579    return 0;
4580  case Intrinsic::exp2:
4581    visitExp2(I);
4582    return 0;
4583  case Intrinsic::pow:
4584    visitPow(I);
4585    return 0;
4586  case Intrinsic::pcmarker: {
4587    SDValue Tmp = getValue(I.getOperand(1));
4588    Res = DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp);
4589    DAG.setRoot(Res);
4590    if (DisableScheduling)
4591      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4592    return 0;
4593  }
4594  case Intrinsic::readcyclecounter: {
4595    SDValue Op = getRoot();
4596    Res = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4597                      DAG.getVTList(MVT::i64, MVT::Other),
4598                      &Op, 1);
4599    setValue(&I, Res);
4600    DAG.setRoot(Res.getValue(1));
4601    if (DisableScheduling)
4602      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4603    return 0;
4604  }
4605  case Intrinsic::bswap:
4606    Res = DAG.getNode(ISD::BSWAP, dl,
4607                      getValue(I.getOperand(1)).getValueType(),
4608                      getValue(I.getOperand(1)));
4609    setValue(&I, Res);
4610    if (DisableScheduling)
4611      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4612    return 0;
4613  case Intrinsic::cttz: {
4614    SDValue Arg = getValue(I.getOperand(1));
4615    EVT Ty = Arg.getValueType();
4616    Res = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4617    setValue(&I, Res);
4618    if (DisableScheduling)
4619      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4620    return 0;
4621  }
4622  case Intrinsic::ctlz: {
4623    SDValue Arg = getValue(I.getOperand(1));
4624    EVT Ty = Arg.getValueType();
4625    Res = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4626    setValue(&I, Res);
4627    if (DisableScheduling)
4628      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4629    return 0;
4630  }
4631  case Intrinsic::ctpop: {
4632    SDValue Arg = getValue(I.getOperand(1));
4633    EVT Ty = Arg.getValueType();
4634    Res = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4635    setValue(&I, Res);
4636    if (DisableScheduling)
4637      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4638    return 0;
4639  }
4640  case Intrinsic::stacksave: {
4641    SDValue Op = getRoot();
4642    Res = DAG.getNode(ISD::STACKSAVE, dl,
4643                      DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4644    setValue(&I, Res);
4645    DAG.setRoot(Res.getValue(1));
4646    if (DisableScheduling)
4647      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4648    return 0;
4649  }
4650  case Intrinsic::stackrestore: {
4651    Res = getValue(I.getOperand(1));
4652    Res = DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res);
4653    DAG.setRoot(Res);
4654    if (DisableScheduling)
4655      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4656    return 0;
4657  }
4658  case Intrinsic::stackprotector: {
4659    // Emit code into the DAG to store the stack guard onto the stack.
4660    MachineFunction &MF = DAG.getMachineFunction();
4661    MachineFrameInfo *MFI = MF.getFrameInfo();
4662    EVT PtrTy = TLI.getPointerTy();
4663
4664    SDValue Src = getValue(I.getOperand(1));   // The guard's value.
4665    AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4666
4667    int FI = FuncInfo.StaticAllocaMap[Slot];
4668    MFI->setStackProtectorIndex(FI);
4669
4670    SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4671
4672    // Store the stack protector onto the stack.
4673    Res = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4674                       PseudoSourceValue::getFixedStack(FI),
4675                       0, true);
4676    setValue(&I, Res);
4677    DAG.setRoot(Res);
4678    if (DisableScheduling)
4679      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4680    return 0;
4681  }
4682  case Intrinsic::objectsize: {
4683    // If we don't know by now, we're never going to know.
4684    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
4685
4686    assert(CI && "Non-constant type in __builtin_object_size?");
4687
4688    SDValue Arg = getValue(I.getOperand(0));
4689    EVT Ty = Arg.getValueType();
4690
4691    if (CI->getZExtValue() == 0)
4692      Res = DAG.getConstant(-1ULL, Ty);
4693    else
4694      Res = DAG.getConstant(0, Ty);
4695
4696    setValue(&I, Res);
4697    if (DisableScheduling)
4698      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4699    return 0;
4700  }
4701  case Intrinsic::var_annotation:
4702    // Discard annotate attributes
4703    return 0;
4704
4705  case Intrinsic::init_trampoline: {
4706    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4707
4708    SDValue Ops[6];
4709    Ops[0] = getRoot();
4710    Ops[1] = getValue(I.getOperand(1));
4711    Ops[2] = getValue(I.getOperand(2));
4712    Ops[3] = getValue(I.getOperand(3));
4713    Ops[4] = DAG.getSrcValue(I.getOperand(1));
4714    Ops[5] = DAG.getSrcValue(F);
4715
4716    Res = DAG.getNode(ISD::TRAMPOLINE, dl,
4717                      DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4718                      Ops, 6);
4719
4720    setValue(&I, Res);
4721    DAG.setRoot(Res.getValue(1));
4722    if (DisableScheduling)
4723      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4724    return 0;
4725  }
4726  case Intrinsic::gcroot:
4727    if (GFI) {
4728      Value *Alloca = I.getOperand(1);
4729      Constant *TypeMap = cast<Constant>(I.getOperand(2));
4730
4731      FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4732      GFI->addStackRoot(FI->getIndex(), TypeMap);
4733    }
4734    return 0;
4735  case Intrinsic::gcread:
4736  case Intrinsic::gcwrite:
4737    llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4738    return 0;
4739  case Intrinsic::flt_rounds:
4740    Res = DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32);
4741    setValue(&I, Res);
4742    if (DisableScheduling)
4743      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4744    return 0;
4745  case Intrinsic::trap:
4746    Res = DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot());
4747    DAG.setRoot(Res);
4748    if (DisableScheduling)
4749      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4750    return 0;
4751  case Intrinsic::uadd_with_overflow:
4752    return implVisitAluOverflow(I, ISD::UADDO);
4753  case Intrinsic::sadd_with_overflow:
4754    return implVisitAluOverflow(I, ISD::SADDO);
4755  case Intrinsic::usub_with_overflow:
4756    return implVisitAluOverflow(I, ISD::USUBO);
4757  case Intrinsic::ssub_with_overflow:
4758    return implVisitAluOverflow(I, ISD::SSUBO);
4759  case Intrinsic::umul_with_overflow:
4760    return implVisitAluOverflow(I, ISD::UMULO);
4761  case Intrinsic::smul_with_overflow:
4762    return implVisitAluOverflow(I, ISD::SMULO);
4763
4764  case Intrinsic::prefetch: {
4765    SDValue Ops[4];
4766    Ops[0] = getRoot();
4767    Ops[1] = getValue(I.getOperand(1));
4768    Ops[2] = getValue(I.getOperand(2));
4769    Ops[3] = getValue(I.getOperand(3));
4770    Res = DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4);
4771    DAG.setRoot(Res);
4772    if (DisableScheduling)
4773      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4774    return 0;
4775  }
4776
4777  case Intrinsic::memory_barrier: {
4778    SDValue Ops[6];
4779    Ops[0] = getRoot();
4780    for (int x = 1; x < 6; ++x)
4781      Ops[x] = getValue(I.getOperand(x));
4782
4783    Res = DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6);
4784    DAG.setRoot(Res);
4785    if (DisableScheduling)
4786      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4787    return 0;
4788  }
4789  case Intrinsic::atomic_cmp_swap: {
4790    SDValue Root = getRoot();
4791    SDValue L =
4792      DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4793                    getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4794                    Root,
4795                    getValue(I.getOperand(1)),
4796                    getValue(I.getOperand(2)),
4797                    getValue(I.getOperand(3)),
4798                    I.getOperand(1));
4799    setValue(&I, L);
4800    DAG.setRoot(L.getValue(1));
4801    if (DisableScheduling)
4802      DAG.AssignOrdering(L.getNode(), SDNodeOrder);
4803    return 0;
4804  }
4805  case Intrinsic::atomic_load_add:
4806    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4807  case Intrinsic::atomic_load_sub:
4808    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4809  case Intrinsic::atomic_load_or:
4810    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4811  case Intrinsic::atomic_load_xor:
4812    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4813  case Intrinsic::atomic_load_and:
4814    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4815  case Intrinsic::atomic_load_nand:
4816    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4817  case Intrinsic::atomic_load_max:
4818    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4819  case Intrinsic::atomic_load_min:
4820    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4821  case Intrinsic::atomic_load_umin:
4822    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4823  case Intrinsic::atomic_load_umax:
4824    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4825  case Intrinsic::atomic_swap:
4826    return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4827
4828  case Intrinsic::invariant_start:
4829  case Intrinsic::lifetime_start:
4830    // Discard region information.
4831    Res = DAG.getUNDEF(TLI.getPointerTy());
4832    setValue(&I, Res);
4833    if (DisableScheduling)
4834      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4835    return 0;
4836  case Intrinsic::invariant_end:
4837  case Intrinsic::lifetime_end:
4838    // Discard region information.
4839    return 0;
4840  }
4841}
4842
4843/// Test if the given instruction is in a position to be optimized
4844/// with a tail-call. This roughly means that it's in a block with
4845/// a return and there's nothing that needs to be scheduled
4846/// between it and the return.
4847///
4848/// This function only tests target-independent requirements.
4849/// For target-dependent requirements, a target should override
4850/// TargetLowering::IsEligibleForTailCallOptimization.
4851///
4852static bool
4853isInTailCallPosition(const Instruction *I, Attributes CalleeRetAttr,
4854                     const TargetLowering &TLI) {
4855  const BasicBlock *ExitBB = I->getParent();
4856  const TerminatorInst *Term = ExitBB->getTerminator();
4857  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4858  const Function *F = ExitBB->getParent();
4859
4860  // The block must end in a return statement or an unreachable.
4861  if (!Ret && !isa<UnreachableInst>(Term)) return false;
4862
4863  // If I will have a chain, make sure no other instruction that will have a
4864  // chain interposes between I and the return.
4865  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4866      !I->isSafeToSpeculativelyExecute())
4867    for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4868         --BBI) {
4869      if (&*BBI == I)
4870        break;
4871      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4872          !BBI->isSafeToSpeculativelyExecute())
4873        return false;
4874    }
4875
4876  // If the block ends with a void return or unreachable, it doesn't matter
4877  // what the call's return type is.
4878  if (!Ret || Ret->getNumOperands() == 0) return true;
4879
4880  // If the return value is undef, it doesn't matter what the call's
4881  // return type is.
4882  if (isa<UndefValue>(Ret->getOperand(0))) return true;
4883
4884  // Conservatively require the attributes of the call to match those of
4885  // the return. Ignore noalias because it doesn't affect the call sequence.
4886  unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
4887  if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
4888    return false;
4889
4890  // Otherwise, make sure the unmodified return value of I is the return value.
4891  for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4892       U = dyn_cast<Instruction>(U->getOperand(0))) {
4893    if (!U)
4894      return false;
4895    if (!U->hasOneUse())
4896      return false;
4897    if (U == I)
4898      break;
4899    // Check for a truly no-op truncate.
4900    if (isa<TruncInst>(U) &&
4901        TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4902      continue;
4903    // Check for a truly no-op bitcast.
4904    if (isa<BitCastInst>(U) &&
4905        (U->getOperand(0)->getType() == U->getType() ||
4906         (isa<PointerType>(U->getOperand(0)->getType()) &&
4907          isa<PointerType>(U->getType()))))
4908      continue;
4909    // Otherwise it's not a true no-op.
4910    return false;
4911  }
4912
4913  return true;
4914}
4915
4916void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
4917                                      bool isTailCall,
4918                                      MachineBasicBlock *LandingPad) {
4919  const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4920  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4921  const Type *RetTy = FTy->getReturnType();
4922  MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4923  unsigned BeginLabel = 0, EndLabel = 0;
4924
4925  TargetLowering::ArgListTy Args;
4926  TargetLowering::ArgListEntry Entry;
4927  Args.reserve(CS.arg_size());
4928
4929  // Check whether the function can return without sret-demotion.
4930  SmallVector<EVT, 4> OutVTs;
4931  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
4932  SmallVector<uint64_t, 4> Offsets;
4933  getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
4934                OutVTs, OutsFlags, TLI, &Offsets);
4935
4936  bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
4937                        FTy->isVarArg(), OutVTs, OutsFlags, DAG);
4938
4939  SDValue DemoteStackSlot;
4940
4941  if (!CanLowerReturn) {
4942    uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
4943                      FTy->getReturnType());
4944    unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(
4945                      FTy->getReturnType());
4946    MachineFunction &MF = DAG.getMachineFunction();
4947    int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
4948    const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
4949
4950    DemoteStackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
4951    Entry.Node = DemoteStackSlot;
4952    Entry.Ty = StackSlotPtrType;
4953    Entry.isSExt = false;
4954    Entry.isZExt = false;
4955    Entry.isInReg = false;
4956    Entry.isSRet = true;
4957    Entry.isNest = false;
4958    Entry.isByVal = false;
4959    Entry.Alignment = Align;
4960    Args.push_back(Entry);
4961    RetTy = Type::getVoidTy(FTy->getContext());
4962  }
4963
4964  for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4965       i != e; ++i) {
4966    SDValue ArgNode = getValue(*i);
4967    Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4968
4969    unsigned attrInd = i - CS.arg_begin() + 1;
4970    Entry.isSExt  = CS.paramHasAttr(attrInd, Attribute::SExt);
4971    Entry.isZExt  = CS.paramHasAttr(attrInd, Attribute::ZExt);
4972    Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4973    Entry.isSRet  = CS.paramHasAttr(attrInd, Attribute::StructRet);
4974    Entry.isNest  = CS.paramHasAttr(attrInd, Attribute::Nest);
4975    Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4976    Entry.Alignment = CS.getParamAlignment(attrInd);
4977    Args.push_back(Entry);
4978  }
4979
4980  if (LandingPad && MMI) {
4981    // Insert a label before the invoke call to mark the try range.  This can be
4982    // used to detect deletion of the invoke via the MachineModuleInfo.
4983    BeginLabel = MMI->NextLabelID();
4984
4985    // Both PendingLoads and PendingExports must be flushed here;
4986    // this call might not return.
4987    (void)getRoot();
4988    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4989                             getControlRoot(), BeginLabel));
4990  }
4991
4992  // Check if target-independent constraints permit a tail call here.
4993  // Target-dependent constraints are checked within TLI.LowerCallTo.
4994  if (isTailCall &&
4995      !isInTailCallPosition(CS.getInstruction(),
4996                            CS.getAttributes().getRetAttributes(),
4997                            TLI))
4998    isTailCall = false;
4999
5000  std::pair<SDValue,SDValue> Result =
5001    TLI.LowerCallTo(getRoot(), RetTy,
5002                    CS.paramHasAttr(0, Attribute::SExt),
5003                    CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
5004                    CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
5005                    CS.getCallingConv(),
5006                    isTailCall,
5007                    !CS.getInstruction()->use_empty(),
5008                    Callee, Args, DAG, getCurDebugLoc(), SDNodeOrder);
5009  assert((isTailCall || Result.second.getNode()) &&
5010         "Non-null chain expected with non-tail call!");
5011  assert((Result.second.getNode() || !Result.first.getNode()) &&
5012         "Null value expected with tail call!");
5013  if (Result.first.getNode()) {
5014    setValue(CS.getInstruction(), Result.first);
5015    if (DisableScheduling)
5016      DAG.AssignOrdering(Result.first.getNode(), SDNodeOrder);
5017  } else if (!CanLowerReturn && Result.second.getNode()) {
5018    // The instruction result is the result of loading from the
5019    // hidden sret parameter.
5020    SmallVector<EVT, 1> PVTs;
5021    const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
5022
5023    ComputeValueVTs(TLI, PtrRetTy, PVTs);
5024    assert(PVTs.size() == 1 && "Pointers should fit in one register");
5025    EVT PtrVT = PVTs[0];
5026    unsigned NumValues = OutVTs.size();
5027    SmallVector<SDValue, 4> Values(NumValues);
5028    SmallVector<SDValue, 4> Chains(NumValues);
5029
5030    for (unsigned i = 0; i < NumValues; ++i) {
5031      SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
5032                                DemoteStackSlot,
5033                                DAG.getConstant(Offsets[i], PtrVT));
5034      SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
5035                              Add, NULL, Offsets[i], false, 1);
5036      Values[i] = L;
5037      Chains[i] = L.getValue(1);
5038    }
5039
5040    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
5041                                MVT::Other, &Chains[0], NumValues);
5042    PendingLoads.push_back(Chain);
5043
5044    SDValue MV = DAG.getNode(ISD::MERGE_VALUES,
5045                             getCurDebugLoc(),
5046                             DAG.getVTList(&OutVTs[0], NumValues),
5047                             &Values[0], NumValues);
5048    setValue(CS.getInstruction(), MV);
5049
5050    if (DisableScheduling) {
5051      DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
5052      DAG.AssignOrdering(MV.getNode(), SDNodeOrder);
5053    }
5054  }
5055
5056  // As a special case, a null chain means that a tail call has been emitted and
5057  // the DAG root is already updated.
5058  if (Result.second.getNode()) {
5059    DAG.setRoot(Result.second);
5060    if (DisableScheduling)
5061      DAG.AssignOrdering(Result.second.getNode(), SDNodeOrder);
5062  } else {
5063    HasTailCall = true;
5064  }
5065
5066  if (LandingPad && MMI) {
5067    // Insert a label at the end of the invoke call to mark the try range.  This
5068    // can be used to detect deletion of the invoke via the MachineModuleInfo.
5069    EndLabel = MMI->NextLabelID();
5070    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
5071                             getRoot(), EndLabel));
5072
5073    // Inform MachineModuleInfo of range.
5074    MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
5075  }
5076}
5077
5078void SelectionDAGBuilder::visitCall(CallInst &I) {
5079  const char *RenameFn = 0;
5080  if (Function *F = I.getCalledFunction()) {
5081    if (F->isDeclaration()) {
5082      const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
5083      if (II) {
5084        if (unsigned IID = II->getIntrinsicID(F)) {
5085          RenameFn = visitIntrinsicCall(I, IID);
5086          if (!RenameFn)
5087            return;
5088        }
5089      }
5090      if (unsigned IID = F->getIntrinsicID()) {
5091        RenameFn = visitIntrinsicCall(I, IID);
5092        if (!RenameFn)
5093          return;
5094      }
5095    }
5096
5097    // Check for well-known libc/libm calls.  If the function is internal, it
5098    // can't be a library call.
5099    if (!F->hasLocalLinkage() && F->hasName()) {
5100      StringRef Name = F->getName();
5101      if (Name == "copysign" || Name == "copysignf") {
5102        if (I.getNumOperands() == 3 &&   // Basic sanity checks.
5103            I.getOperand(1)->getType()->isFloatingPoint() &&
5104            I.getType() == I.getOperand(1)->getType() &&
5105            I.getType() == I.getOperand(2)->getType()) {
5106          SDValue LHS = getValue(I.getOperand(1));
5107          SDValue RHS = getValue(I.getOperand(2));
5108          setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
5109                                   LHS.getValueType(), LHS, RHS));
5110          return;
5111        }
5112      } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
5113        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5114            I.getOperand(1)->getType()->isFloatingPoint() &&
5115            I.getType() == I.getOperand(1)->getType()) {
5116          SDValue Tmp = getValue(I.getOperand(1));
5117          setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
5118                                   Tmp.getValueType(), Tmp));
5119          return;
5120        }
5121      } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
5122        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5123            I.getOperand(1)->getType()->isFloatingPoint() &&
5124            I.getType() == I.getOperand(1)->getType() &&
5125            I.onlyReadsMemory()) {
5126          SDValue Tmp = getValue(I.getOperand(1));
5127          setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
5128                                   Tmp.getValueType(), Tmp));
5129          return;
5130        }
5131      } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
5132        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5133            I.getOperand(1)->getType()->isFloatingPoint() &&
5134            I.getType() == I.getOperand(1)->getType() &&
5135            I.onlyReadsMemory()) {
5136          SDValue Tmp = getValue(I.getOperand(1));
5137          setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
5138                                   Tmp.getValueType(), Tmp));
5139          return;
5140        }
5141      } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
5142        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5143            I.getOperand(1)->getType()->isFloatingPoint() &&
5144            I.getType() == I.getOperand(1)->getType() &&
5145            I.onlyReadsMemory()) {
5146          SDValue Tmp = getValue(I.getOperand(1));
5147          setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
5148                                   Tmp.getValueType(), Tmp));
5149          return;
5150        }
5151      }
5152    }
5153  } else if (isa<InlineAsm>(I.getOperand(0))) {
5154    visitInlineAsm(&I);
5155    return;
5156  }
5157
5158  SDValue Callee;
5159  if (!RenameFn)
5160    Callee = getValue(I.getOperand(0));
5161  else
5162    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
5163
5164  // Check if we can potentially perform a tail call. More detailed checking is
5165  // be done within LowerCallTo, after more information about the call is known.
5166  bool isTailCall = PerformTailCallOpt && I.isTailCall();
5167
5168  LowerCallTo(&I, Callee, isTailCall);
5169}
5170
5171/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
5172/// this value and returns the result as a ValueVT value.  This uses
5173/// Chain/Flag as the input and updates them for the output Chain/Flag.
5174/// If the Flag pointer is NULL, no flag is used.
5175SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
5176                                      unsigned Order, SDValue &Chain,
5177                                      SDValue *Flag) const {
5178  // Assemble the legal parts into the final values.
5179  SmallVector<SDValue, 4> Values(ValueVTs.size());
5180  SmallVector<SDValue, 8> Parts;
5181  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
5182    // Copy the legal parts from the registers.
5183    EVT ValueVT = ValueVTs[Value];
5184    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
5185    EVT RegisterVT = RegVTs[Value];
5186
5187    Parts.resize(NumRegs);
5188    for (unsigned i = 0; i != NumRegs; ++i) {
5189      SDValue P;
5190      if (Flag == 0) {
5191        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
5192      } else {
5193        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
5194        *Flag = P.getValue(2);
5195      }
5196
5197      Chain = P.getValue(1);
5198
5199      if (DisableScheduling)
5200        DAG.AssignOrdering(P.getNode(), Order);
5201
5202      // If the source register was virtual and if we know something about it,
5203      // add an assert node.
5204      if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
5205          RegisterVT.isInteger() && !RegisterVT.isVector()) {
5206        unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
5207        FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
5208        if (FLI.LiveOutRegInfo.size() > SlotNo) {
5209          FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
5210
5211          unsigned RegSize = RegisterVT.getSizeInBits();
5212          unsigned NumSignBits = LOI.NumSignBits;
5213          unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
5214
5215          // FIXME: We capture more information than the dag can represent.  For
5216          // now, just use the tightest assertzext/assertsext possible.
5217          bool isSExt = true;
5218          EVT FromVT(MVT::Other);
5219          if (NumSignBits == RegSize)
5220            isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
5221          else if (NumZeroBits >= RegSize-1)
5222            isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
5223          else if (NumSignBits > RegSize-8)
5224            isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
5225          else if (NumZeroBits >= RegSize-8)
5226            isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
5227          else if (NumSignBits > RegSize-16)
5228            isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
5229          else if (NumZeroBits >= RegSize-16)
5230            isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
5231          else if (NumSignBits > RegSize-32)
5232            isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
5233          else if (NumZeroBits >= RegSize-32)
5234            isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
5235
5236          if (FromVT != MVT::Other) {
5237            P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
5238                            RegisterVT, P, DAG.getValueType(FromVT));
5239
5240            if (DisableScheduling)
5241              DAG.AssignOrdering(P.getNode(), Order);
5242          }
5243        }
5244      }
5245
5246      Parts[i] = P;
5247    }
5248
5249    Values[Value] = getCopyFromParts(DAG, dl, Order, Parts.begin(),
5250                                     NumRegs, RegisterVT, ValueVT);
5251    if (DisableScheduling)
5252      DAG.AssignOrdering(Values[Value].getNode(), Order);
5253    Part += NumRegs;
5254    Parts.clear();
5255  }
5256
5257  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5258                            DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
5259                            &Values[0], ValueVTs.size());
5260  if (DisableScheduling)
5261    DAG.AssignOrdering(Res.getNode(), Order);
5262  return Res;
5263}
5264
5265/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
5266/// specified value into the registers specified by this object.  This uses
5267/// Chain/Flag as the input and updates them for the output Chain/Flag.
5268/// If the Flag pointer is NULL, no flag is used.
5269void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
5270                                 unsigned Order, SDValue &Chain,
5271                                 SDValue *Flag) const {
5272  // Get the list of the values's legal parts.
5273  unsigned NumRegs = Regs.size();
5274  SmallVector<SDValue, 8> Parts(NumRegs);
5275  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
5276    EVT ValueVT = ValueVTs[Value];
5277    unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
5278    EVT RegisterVT = RegVTs[Value];
5279
5280    getCopyToParts(DAG, dl, Order,
5281                   Val.getValue(Val.getResNo() + Value),
5282                   &Parts[Part], NumParts, RegisterVT);
5283    Part += NumParts;
5284  }
5285
5286  // Copy the parts into the registers.
5287  SmallVector<SDValue, 8> Chains(NumRegs);
5288  for (unsigned i = 0; i != NumRegs; ++i) {
5289    SDValue Part;
5290    if (Flag == 0) {
5291      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
5292    } else {
5293      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
5294      *Flag = Part.getValue(1);
5295    }
5296
5297    Chains[i] = Part.getValue(0);
5298
5299    if (DisableScheduling)
5300      DAG.AssignOrdering(Part.getNode(), Order);
5301  }
5302
5303  if (NumRegs == 1 || Flag)
5304    // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
5305    // flagged to it. That is the CopyToReg nodes and the user are considered
5306    // a single scheduling unit. If we create a TokenFactor and return it as
5307    // chain, then the TokenFactor is both a predecessor (operand) of the
5308    // user as well as a successor (the TF operands are flagged to the user).
5309    // c1, f1 = CopyToReg
5310    // c2, f2 = CopyToReg
5311    // c3     = TokenFactor c1, c2
5312    // ...
5313    //        = op c3, ..., f2
5314    Chain = Chains[NumRegs-1];
5315  else
5316    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
5317
5318  if (DisableScheduling)
5319    DAG.AssignOrdering(Chain.getNode(), Order);
5320}
5321
5322/// AddInlineAsmOperands - Add this value to the specified inlineasm node
5323/// operand list.  This adds the code marker and includes the number of
5324/// values added into it.
5325void RegsForValue::AddInlineAsmOperands(unsigned Code,
5326                                        bool HasMatching,unsigned MatchingIdx,
5327                                        SelectionDAG &DAG, unsigned Order,
5328                                        std::vector<SDValue> &Ops) const {
5329  EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
5330  assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
5331  unsigned Flag = Code | (Regs.size() << 3);
5332  if (HasMatching)
5333    Flag |= 0x80000000 | (MatchingIdx << 16);
5334
5335  SDValue Res = DAG.getTargetConstant(Flag, IntPtrTy);
5336  Ops.push_back(Res);
5337
5338  if (DisableScheduling)
5339    DAG.AssignOrdering(Res.getNode(), Order);
5340
5341  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
5342    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
5343    EVT RegisterVT = RegVTs[Value];
5344    for (unsigned i = 0; i != NumRegs; ++i) {
5345      assert(Reg < Regs.size() && "Mismatch in # registers expected");
5346      SDValue Res = DAG.getRegister(Regs[Reg++], RegisterVT);
5347      Ops.push_back(Res);
5348
5349      if (DisableScheduling)
5350        DAG.AssignOrdering(Res.getNode(), Order);
5351    }
5352  }
5353}
5354
5355/// isAllocatableRegister - If the specified register is safe to allocate,
5356/// i.e. it isn't a stack pointer or some other special register, return the
5357/// register class for the register.  Otherwise, return null.
5358static const TargetRegisterClass *
5359isAllocatableRegister(unsigned Reg, MachineFunction &MF,
5360                      const TargetLowering &TLI,
5361                      const TargetRegisterInfo *TRI) {
5362  EVT FoundVT = MVT::Other;
5363  const TargetRegisterClass *FoundRC = 0;
5364  for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
5365       E = TRI->regclass_end(); RCI != E; ++RCI) {
5366    EVT ThisVT = MVT::Other;
5367
5368    const TargetRegisterClass *RC = *RCI;
5369    // If none of the the value types for this register class are valid, we
5370    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
5371    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
5372         I != E; ++I) {
5373      if (TLI.isTypeLegal(*I)) {
5374        // If we have already found this register in a different register class,
5375        // choose the one with the largest VT specified.  For example, on
5376        // PowerPC, we favor f64 register classes over f32.
5377        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
5378          ThisVT = *I;
5379          break;
5380        }
5381      }
5382    }
5383
5384    if (ThisVT == MVT::Other) continue;
5385
5386    // NOTE: This isn't ideal.  In particular, this might allocate the
5387    // frame pointer in functions that need it (due to them not being taken
5388    // out of allocation, because a variable sized allocation hasn't been seen
5389    // yet).  This is a slight code pessimization, but should still work.
5390    for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
5391         E = RC->allocation_order_end(MF); I != E; ++I)
5392      if (*I == Reg) {
5393        // We found a matching register class.  Keep looking at others in case
5394        // we find one with larger registers that this physreg is also in.
5395        FoundRC = RC;
5396        FoundVT = ThisVT;
5397        break;
5398      }
5399  }
5400  return FoundRC;
5401}
5402
5403
5404namespace llvm {
5405/// AsmOperandInfo - This contains information for each constraint that we are
5406/// lowering.
5407class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
5408    public TargetLowering::AsmOperandInfo {
5409public:
5410  /// CallOperand - If this is the result output operand or a clobber
5411  /// this is null, otherwise it is the incoming operand to the CallInst.
5412  /// This gets modified as the asm is processed.
5413  SDValue CallOperand;
5414
5415  /// AssignedRegs - If this is a register or register class operand, this
5416  /// contains the set of register corresponding to the operand.
5417  RegsForValue AssignedRegs;
5418
5419  explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
5420    : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
5421  }
5422
5423  /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
5424  /// busy in OutputRegs/InputRegs.
5425  void MarkAllocatedRegs(bool isOutReg, bool isInReg,
5426                         std::set<unsigned> &OutputRegs,
5427                         std::set<unsigned> &InputRegs,
5428                         const TargetRegisterInfo &TRI) const {
5429    if (isOutReg) {
5430      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
5431        MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
5432    }
5433    if (isInReg) {
5434      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
5435        MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
5436    }
5437  }
5438
5439  /// getCallOperandValEVT - Return the EVT of the Value* that this operand
5440  /// corresponds to.  If there is no Value* for this operand, it returns
5441  /// MVT::Other.
5442  EVT getCallOperandValEVT(LLVMContext &Context,
5443                           const TargetLowering &TLI,
5444                           const TargetData *TD) const {
5445    if (CallOperandVal == 0) return MVT::Other;
5446
5447    if (isa<BasicBlock>(CallOperandVal))
5448      return TLI.getPointerTy();
5449
5450    const llvm::Type *OpTy = CallOperandVal->getType();
5451
5452    // If this is an indirect operand, the operand is a pointer to the
5453    // accessed type.
5454    if (isIndirect) {
5455      const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
5456      if (!PtrTy)
5457        llvm_report_error("Indirect operand for inline asm not a pointer!");
5458      OpTy = PtrTy->getElementType();
5459    }
5460
5461    // If OpTy is not a single value, it may be a struct/union that we
5462    // can tile with integers.
5463    if (!OpTy->isSingleValueType() && OpTy->isSized()) {
5464      unsigned BitSize = TD->getTypeSizeInBits(OpTy);
5465      switch (BitSize) {
5466      default: break;
5467      case 1:
5468      case 8:
5469      case 16:
5470      case 32:
5471      case 64:
5472      case 128:
5473        OpTy = IntegerType::get(Context, BitSize);
5474        break;
5475      }
5476    }
5477
5478    return TLI.getValueType(OpTy, true);
5479  }
5480
5481private:
5482  /// MarkRegAndAliases - Mark the specified register and all aliases in the
5483  /// specified set.
5484  static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
5485                                const TargetRegisterInfo &TRI) {
5486    assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
5487    Regs.insert(Reg);
5488    if (const unsigned *Aliases = TRI.getAliasSet(Reg))
5489      for (; *Aliases; ++Aliases)
5490        Regs.insert(*Aliases);
5491  }
5492};
5493} // end llvm namespace.
5494
5495
5496/// GetRegistersForValue - Assign registers (virtual or physical) for the
5497/// specified operand.  We prefer to assign virtual registers, to allow the
5498/// register allocator to handle the assignment process.  However, if the asm
5499/// uses features that we can't model on machineinstrs, we have SDISel do the
5500/// allocation.  This produces generally horrible, but correct, code.
5501///
5502///   OpInfo describes the operand.
5503///   Input and OutputRegs are the set of already allocated physical registers.
5504///
5505void SelectionDAGBuilder::
5506GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
5507                     std::set<unsigned> &OutputRegs,
5508                     std::set<unsigned> &InputRegs) {
5509  LLVMContext &Context = FuncInfo.Fn->getContext();
5510
5511  // Compute whether this value requires an input register, an output register,
5512  // or both.
5513  bool isOutReg = false;
5514  bool isInReg = false;
5515  switch (OpInfo.Type) {
5516  case InlineAsm::isOutput:
5517    isOutReg = true;
5518
5519    // If there is an input constraint that matches this, we need to reserve
5520    // the input register so no other inputs allocate to it.
5521    isInReg = OpInfo.hasMatchingInput();
5522    break;
5523  case InlineAsm::isInput:
5524    isInReg = true;
5525    isOutReg = false;
5526    break;
5527  case InlineAsm::isClobber:
5528    isOutReg = true;
5529    isInReg = true;
5530    break;
5531  }
5532
5533
5534  MachineFunction &MF = DAG.getMachineFunction();
5535  SmallVector<unsigned, 4> Regs;
5536
5537  // If this is a constraint for a single physreg, or a constraint for a
5538  // register class, find it.
5539  std::pair<unsigned, const TargetRegisterClass*> PhysReg =
5540    TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
5541                                     OpInfo.ConstraintVT);
5542
5543  unsigned NumRegs = 1;
5544  if (OpInfo.ConstraintVT != MVT::Other) {
5545    // If this is a FP input in an integer register (or visa versa) insert a bit
5546    // cast of the input value.  More generally, handle any case where the input
5547    // value disagrees with the register class we plan to stick this in.
5548    if (OpInfo.Type == InlineAsm::isInput &&
5549        PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
5550      // Try to convert to the first EVT that the reg class contains.  If the
5551      // types are identical size, use a bitcast to convert (e.g. two differing
5552      // vector types).
5553      EVT RegVT = *PhysReg.second->vt_begin();
5554      if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
5555        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5556                                         RegVT, OpInfo.CallOperand);
5557        OpInfo.ConstraintVT = RegVT;
5558      } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
5559        // If the input is a FP value and we want it in FP registers, do a
5560        // bitcast to the corresponding integer type.  This turns an f64 value
5561        // into i64, which can be passed with two i32 values on a 32-bit
5562        // machine.
5563        RegVT = EVT::getIntegerVT(Context,
5564                                  OpInfo.ConstraintVT.getSizeInBits());
5565        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5566                                         RegVT, OpInfo.CallOperand);
5567        OpInfo.ConstraintVT = RegVT;
5568      }
5569
5570      if (DisableScheduling)
5571        DAG.AssignOrdering(OpInfo.CallOperand.getNode(), SDNodeOrder);
5572    }
5573
5574    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
5575  }
5576
5577  EVT RegVT;
5578  EVT ValueVT = OpInfo.ConstraintVT;
5579
5580  // If this is a constraint for a specific physical register, like {r17},
5581  // assign it now.
5582  if (unsigned AssignedReg = PhysReg.first) {
5583    const TargetRegisterClass *RC = PhysReg.second;
5584    if (OpInfo.ConstraintVT == MVT::Other)
5585      ValueVT = *RC->vt_begin();
5586
5587    // Get the actual register value type.  This is important, because the user
5588    // may have asked for (e.g.) the AX register in i32 type.  We need to
5589    // remember that AX is actually i16 to get the right extension.
5590    RegVT = *RC->vt_begin();
5591
5592    // This is a explicit reference to a physical register.
5593    Regs.push_back(AssignedReg);
5594
5595    // If this is an expanded reference, add the rest of the regs to Regs.
5596    if (NumRegs != 1) {
5597      TargetRegisterClass::iterator I = RC->begin();
5598      for (; *I != AssignedReg; ++I)
5599        assert(I != RC->end() && "Didn't find reg!");
5600
5601      // Already added the first reg.
5602      --NumRegs; ++I;
5603      for (; NumRegs; --NumRegs, ++I) {
5604        assert(I != RC->end() && "Ran out of registers to allocate!");
5605        Regs.push_back(*I);
5606      }
5607    }
5608
5609    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5610    const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5611    OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5612    return;
5613  }
5614
5615  // Otherwise, if this was a reference to an LLVM register class, create vregs
5616  // for this reference.
5617  if (const TargetRegisterClass *RC = PhysReg.second) {
5618    RegVT = *RC->vt_begin();
5619    if (OpInfo.ConstraintVT == MVT::Other)
5620      ValueVT = RegVT;
5621
5622    // Create the appropriate number of virtual registers.
5623    MachineRegisterInfo &RegInfo = MF.getRegInfo();
5624    for (; NumRegs; --NumRegs)
5625      Regs.push_back(RegInfo.createVirtualRegister(RC));
5626
5627    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5628    return;
5629  }
5630
5631  // This is a reference to a register class that doesn't directly correspond
5632  // to an LLVM register class.  Allocate NumRegs consecutive, available,
5633  // registers from the class.
5634  std::vector<unsigned> RegClassRegs
5635    = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5636                                            OpInfo.ConstraintVT);
5637
5638  const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5639  unsigned NumAllocated = 0;
5640  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5641    unsigned Reg = RegClassRegs[i];
5642    // See if this register is available.
5643    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
5644        (isInReg  && InputRegs.count(Reg))) {    // Already used.
5645      // Make sure we find consecutive registers.
5646      NumAllocated = 0;
5647      continue;
5648    }
5649
5650    // Check to see if this register is allocatable (i.e. don't give out the
5651    // stack pointer).
5652    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5653    if (!RC) {        // Couldn't allocate this register.
5654      // Reset NumAllocated to make sure we return consecutive registers.
5655      NumAllocated = 0;
5656      continue;
5657    }
5658
5659    // Okay, this register is good, we can use it.
5660    ++NumAllocated;
5661
5662    // If we allocated enough consecutive registers, succeed.
5663    if (NumAllocated == NumRegs) {
5664      unsigned RegStart = (i-NumAllocated)+1;
5665      unsigned RegEnd   = i+1;
5666      // Mark all of the allocated registers used.
5667      for (unsigned i = RegStart; i != RegEnd; ++i)
5668        Regs.push_back(RegClassRegs[i]);
5669
5670      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5671                                         OpInfo.ConstraintVT);
5672      OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5673      return;
5674    }
5675  }
5676
5677  // Otherwise, we couldn't allocate enough registers for this.
5678}
5679
5680/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5681/// processed uses a memory 'm' constraint.
5682static bool
5683hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5684                          const TargetLowering &TLI) {
5685  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5686    InlineAsm::ConstraintInfo &CI = CInfos[i];
5687    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5688      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5689      if (CType == TargetLowering::C_Memory)
5690        return true;
5691    }
5692
5693    // Indirect operand accesses access memory.
5694    if (CI.isIndirect)
5695      return true;
5696  }
5697
5698  return false;
5699}
5700
5701/// visitInlineAsm - Handle a call to an InlineAsm object.
5702///
5703void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
5704  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5705
5706  /// ConstraintOperands - Information about all of the constraints.
5707  std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5708
5709  std::set<unsigned> OutputRegs, InputRegs;
5710
5711  // Do a prepass over the constraints, canonicalizing them, and building up the
5712  // ConstraintOperands list.
5713  std::vector<InlineAsm::ConstraintInfo>
5714    ConstraintInfos = IA->ParseConstraints();
5715
5716  bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5717
5718  SDValue Chain, Flag;
5719
5720  // We won't need to flush pending loads if this asm doesn't touch
5721  // memory and is nonvolatile.
5722  if (hasMemory || IA->hasSideEffects())
5723    Chain = getRoot();
5724  else
5725    Chain = DAG.getRoot();
5726
5727  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
5728  unsigned ResNo = 0;   // ResNo - The result number of the next output.
5729  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5730    ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5731    SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5732
5733    EVT OpVT = MVT::Other;
5734
5735    // Compute the value type for each operand.
5736    switch (OpInfo.Type) {
5737    case InlineAsm::isOutput:
5738      // Indirect outputs just consume an argument.
5739      if (OpInfo.isIndirect) {
5740        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5741        break;
5742      }
5743
5744      // The return value of the call is this value.  As such, there is no
5745      // corresponding argument.
5746      assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5747             "Bad inline asm!");
5748      if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5749        OpVT = TLI.getValueType(STy->getElementType(ResNo));
5750      } else {
5751        assert(ResNo == 0 && "Asm only has one result!");
5752        OpVT = TLI.getValueType(CS.getType());
5753      }
5754      ++ResNo;
5755      break;
5756    case InlineAsm::isInput:
5757      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5758      break;
5759    case InlineAsm::isClobber:
5760      // Nothing to do.
5761      break;
5762    }
5763
5764    // If this is an input or an indirect output, process the call argument.
5765    // BasicBlocks are labels, currently appearing only in asm's.
5766    if (OpInfo.CallOperandVal) {
5767      // Strip bitcasts, if any.  This mostly comes up for functions.
5768      OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5769
5770      if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5771        OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5772      } else {
5773        OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5774      }
5775
5776      OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5777    }
5778
5779    OpInfo.ConstraintVT = OpVT;
5780  }
5781
5782  // Second pass over the constraints: compute which constraint option to use
5783  // and assign registers to constraints that want a specific physreg.
5784  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5785    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5786
5787    // If this is an output operand with a matching input operand, look up the
5788    // matching input. If their types mismatch, e.g. one is an integer, the
5789    // other is floating point, or their sizes are different, flag it as an
5790    // error.
5791    if (OpInfo.hasMatchingInput()) {
5792      SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5793      if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5794        if ((OpInfo.ConstraintVT.isInteger() !=
5795             Input.ConstraintVT.isInteger()) ||
5796            (OpInfo.ConstraintVT.getSizeInBits() !=
5797             Input.ConstraintVT.getSizeInBits())) {
5798          llvm_report_error("Unsupported asm: input constraint"
5799                            " with a matching output constraint of incompatible"
5800                            " type!");
5801        }
5802        Input.ConstraintVT = OpInfo.ConstraintVT;
5803      }
5804    }
5805
5806    // Compute the constraint code and ConstraintType to use.
5807    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5808
5809    // If this is a memory input, and if the operand is not indirect, do what we
5810    // need to to provide an address for the memory input.
5811    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5812        !OpInfo.isIndirect) {
5813      assert(OpInfo.Type == InlineAsm::isInput &&
5814             "Can only indirectify direct input operands!");
5815
5816      // Memory operands really want the address of the value.  If we don't have
5817      // an indirect input, put it in the constpool if we can, otherwise spill
5818      // it to a stack slot.
5819
5820      // If the operand is a float, integer, or vector constant, spill to a
5821      // constant pool entry to get its address.
5822      Value *OpVal = OpInfo.CallOperandVal;
5823      if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5824          isa<ConstantVector>(OpVal)) {
5825        OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5826                                                 TLI.getPointerTy());
5827      } else {
5828        // Otherwise, create a stack slot and emit a store to it before the
5829        // asm.
5830        const Type *Ty = OpVal->getType();
5831        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5832        unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5833        MachineFunction &MF = DAG.getMachineFunction();
5834        int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
5835        SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5836        Chain = DAG.getStore(Chain, getCurDebugLoc(),
5837                             OpInfo.CallOperand, StackSlot, NULL, 0);
5838        OpInfo.CallOperand = StackSlot;
5839      }
5840
5841      // There is no longer a Value* corresponding to this operand.
5842      OpInfo.CallOperandVal = 0;
5843
5844      // It is now an indirect operand.
5845      OpInfo.isIndirect = true;
5846    }
5847
5848    // If this constraint is for a specific register, allocate it before
5849    // anything else.
5850    if (OpInfo.ConstraintType == TargetLowering::C_Register)
5851      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5852  }
5853
5854  ConstraintInfos.clear();
5855
5856  // Second pass - Loop over all of the operands, assigning virtual or physregs
5857  // to register class operands.
5858  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5859    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5860
5861    // C_Register operands have already been allocated, Other/Memory don't need
5862    // to be.
5863    if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5864      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5865  }
5866
5867  // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5868  std::vector<SDValue> AsmNodeOperands;
5869  AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
5870  AsmNodeOperands.push_back(
5871          DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5872
5873
5874  // Loop over all of the inputs, copying the operand values into the
5875  // appropriate registers and processing the output regs.
5876  RegsForValue RetValRegs;
5877
5878  // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5879  std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5880
5881  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5882    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5883
5884    switch (OpInfo.Type) {
5885    case InlineAsm::isOutput: {
5886      if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5887          OpInfo.ConstraintType != TargetLowering::C_Register) {
5888        // Memory output, or 'other' output (e.g. 'X' constraint).
5889        assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5890
5891        // Add information to the INLINEASM node to know about this output.
5892        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5893        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5894                                                        TLI.getPointerTy()));
5895        AsmNodeOperands.push_back(OpInfo.CallOperand);
5896        break;
5897      }
5898
5899      // Otherwise, this is a register or register class output.
5900
5901      // Copy the output from the appropriate register.  Find a register that
5902      // we can use.
5903      if (OpInfo.AssignedRegs.Regs.empty()) {
5904        llvm_report_error("Couldn't allocate output reg for"
5905                          " constraint '" + OpInfo.ConstraintCode + "'!");
5906      }
5907
5908      // If this is an indirect operand, store through the pointer after the
5909      // asm.
5910      if (OpInfo.isIndirect) {
5911        IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5912                                                      OpInfo.CallOperandVal));
5913      } else {
5914        // This is the result value of the call.
5915        assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5916               "Bad inline asm!");
5917        // Concatenate this output onto the outputs list.
5918        RetValRegs.append(OpInfo.AssignedRegs);
5919      }
5920
5921      // Add information to the INLINEASM node to know that this register is
5922      // set.
5923      OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5924                                               6 /* EARLYCLOBBER REGDEF */ :
5925                                               2 /* REGDEF */ ,
5926                                               false,
5927                                               0,
5928                                               DAG, SDNodeOrder,
5929                                               AsmNodeOperands);
5930      break;
5931    }
5932    case InlineAsm::isInput: {
5933      SDValue InOperandVal = OpInfo.CallOperand;
5934
5935      if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
5936        // If this is required to match an output register we have already set,
5937        // just use its register.
5938        unsigned OperandNo = OpInfo.getMatchedOperand();
5939
5940        // Scan until we find the definition we already emitted of this operand.
5941        // When we find it, create a RegsForValue operand.
5942        unsigned CurOp = 2;  // The first operand.
5943        for (; OperandNo; --OperandNo) {
5944          // Advance to the next operand.
5945          unsigned OpFlag =
5946            cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5947          assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5948                  (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5949                  (OpFlag & 7) == 4 /*MEM*/) &&
5950                 "Skipped past definitions?");
5951          CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5952        }
5953
5954        unsigned OpFlag =
5955          cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5956        if ((OpFlag & 7) == 2 /*REGDEF*/
5957            || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5958          // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5959          if (OpInfo.isIndirect) {
5960            llvm_report_error("Don't know how to handle tied indirect "
5961                              "register inputs yet!");
5962          }
5963          RegsForValue MatchedRegs;
5964          MatchedRegs.TLI = &TLI;
5965          MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5966          EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5967          MatchedRegs.RegVTs.push_back(RegVT);
5968          MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5969          for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5970               i != e; ++i)
5971            MatchedRegs.Regs.
5972              push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5973
5974          // Use the produced MatchedRegs object to
5975          MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5976                                    SDNodeOrder, Chain, &Flag);
5977          MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5978                                           true, OpInfo.getMatchedOperand(),
5979                                           DAG, SDNodeOrder, AsmNodeOperands);
5980          break;
5981        } else {
5982          assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5983          assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5984                 "Unexpected number of operands");
5985          // Add information to the INLINEASM node to know about this input.
5986          // See InlineAsm.h isUseOperandTiedToDef.
5987          OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5988          AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5989                                                          TLI.getPointerTy()));
5990          AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5991          break;
5992        }
5993      }
5994
5995      if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5996        assert(!OpInfo.isIndirect &&
5997               "Don't know how to handle indirect other inputs yet!");
5998
5999        std::vector<SDValue> Ops;
6000        TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
6001                                         hasMemory, Ops, DAG);
6002        if (Ops.empty()) {
6003          llvm_report_error("Invalid operand for inline asm"
6004                            " constraint '" + OpInfo.ConstraintCode + "'!");
6005        }
6006
6007        // Add information to the INLINEASM node to know about this input.
6008        unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
6009        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6010                                                        TLI.getPointerTy()));
6011        AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
6012        break;
6013      } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
6014        assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
6015        assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
6016               "Memory operands expect pointer values");
6017
6018        // Add information to the INLINEASM node to know about this input.
6019        unsigned ResOpType = 4/*MEM*/ | (1<<3);
6020        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6021                                                        TLI.getPointerTy()));
6022        AsmNodeOperands.push_back(InOperandVal);
6023        break;
6024      }
6025
6026      assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
6027              OpInfo.ConstraintType == TargetLowering::C_Register) &&
6028             "Unknown constraint type!");
6029      assert(!OpInfo.isIndirect &&
6030             "Don't know how to handle indirect register inputs yet!");
6031
6032      // Copy the input into the appropriate registers.
6033      if (OpInfo.AssignedRegs.Regs.empty()) {
6034        llvm_report_error("Couldn't allocate input reg for"
6035                          " constraint '"+ OpInfo.ConstraintCode +"'!");
6036      }
6037
6038      OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
6039                                        SDNodeOrder, Chain, &Flag);
6040
6041      OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
6042                                               DAG, SDNodeOrder,
6043                                               AsmNodeOperands);
6044      break;
6045    }
6046    case InlineAsm::isClobber: {
6047      // Add the clobbered value to the operand list, so that the register
6048      // allocator is aware that the physreg got clobbered.
6049      if (!OpInfo.AssignedRegs.Regs.empty())
6050        OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
6051                                                 false, 0, DAG, SDNodeOrder,
6052                                                 AsmNodeOperands);
6053      break;
6054    }
6055    }
6056  }
6057
6058  // Finish up input operands.
6059  AsmNodeOperands[0] = Chain;
6060  if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
6061
6062  Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
6063                      DAG.getVTList(MVT::Other, MVT::Flag),
6064                      &AsmNodeOperands[0], AsmNodeOperands.size());
6065  Flag = Chain.getValue(1);
6066
6067  // If this asm returns a register value, copy the result from that register
6068  // and set it as the value of the call.
6069  if (!RetValRegs.Regs.empty()) {
6070    SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
6071                                             SDNodeOrder, Chain, &Flag);
6072
6073    // FIXME: Why don't we do this for inline asms with MRVs?
6074    if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
6075      EVT ResultType = TLI.getValueType(CS.getType());
6076
6077      // If any of the results of the inline asm is a vector, it may have the
6078      // wrong width/num elts.  This can happen for register classes that can
6079      // contain multiple different value types.  The preg or vreg allocated may
6080      // not have the same VT as was expected.  Convert it to the right type
6081      // with bit_convert.
6082      if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
6083        Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
6084                          ResultType, Val);
6085
6086      } else if (ResultType != Val.getValueType() &&
6087                 ResultType.isInteger() && Val.getValueType().isInteger()) {
6088        // If a result value was tied to an input value, the computed result may
6089        // have a wider width than the expected result.  Extract the relevant
6090        // portion.
6091        Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
6092      }
6093
6094      assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
6095    }
6096
6097    setValue(CS.getInstruction(), Val);
6098    // Don't need to use this as a chain in this case.
6099    if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
6100      return;
6101  }
6102
6103  std::vector<std::pair<SDValue, Value*> > StoresToEmit;
6104
6105  // Process indirect outputs, first output all of the flagged copies out of
6106  // physregs.
6107  for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
6108    RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
6109    Value *Ptr = IndirectStoresToEmit[i].second;
6110    SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
6111                                             SDNodeOrder, Chain, &Flag);
6112    StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
6113
6114  }
6115
6116  // Emit the non-flagged stores from the physregs.
6117  SmallVector<SDValue, 8> OutChains;
6118  for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
6119    SDValue Val = DAG.getStore(Chain, getCurDebugLoc(),
6120                               StoresToEmit[i].first,
6121                               getValue(StoresToEmit[i].second),
6122                               StoresToEmit[i].second, 0);
6123    OutChains.push_back(Val);
6124  }
6125
6126  if (!OutChains.empty())
6127    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
6128                        &OutChains[0], OutChains.size());
6129
6130  DAG.setRoot(Chain);
6131}
6132
6133void SelectionDAGBuilder::visitVAStart(CallInst &I) {
6134  DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
6135                          MVT::Other, getRoot(),
6136                          getValue(I.getOperand(1)),
6137                          DAG.getSrcValue(I.getOperand(1))));
6138}
6139
6140void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
6141  SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
6142                           getRoot(), getValue(I.getOperand(0)),
6143                           DAG.getSrcValue(I.getOperand(0)));
6144  setValue(&I, V);
6145  DAG.setRoot(V.getValue(1));
6146}
6147
6148void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
6149  DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
6150                          MVT::Other, getRoot(),
6151                          getValue(I.getOperand(1)),
6152                          DAG.getSrcValue(I.getOperand(1))));
6153}
6154
6155void SelectionDAGBuilder::visitVACopy(CallInst &I) {
6156  DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
6157                          MVT::Other, getRoot(),
6158                          getValue(I.getOperand(1)),
6159                          getValue(I.getOperand(2)),
6160                          DAG.getSrcValue(I.getOperand(1)),
6161                          DAG.getSrcValue(I.getOperand(2))));
6162}
6163
6164/// TargetLowering::LowerCallTo - This is the default LowerCallTo
6165/// implementation, which just calls LowerCall.
6166/// FIXME: When all targets are
6167/// migrated to using LowerCall, this hook should be integrated into SDISel.
6168std::pair<SDValue, SDValue>
6169TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
6170                            bool RetSExt, bool RetZExt, bool isVarArg,
6171                            bool isInreg, unsigned NumFixedArgs,
6172                            CallingConv::ID CallConv, bool isTailCall,
6173                            bool isReturnValueUsed,
6174                            SDValue Callee,
6175                            ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl,
6176                            unsigned Order) {
6177  assert((!isTailCall || PerformTailCallOpt) &&
6178         "isTailCall set when tail-call optimizations are disabled!");
6179
6180  // Handle all of the outgoing arguments.
6181  SmallVector<ISD::OutputArg, 32> Outs;
6182  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
6183    SmallVector<EVT, 4> ValueVTs;
6184    ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
6185    for (unsigned Value = 0, NumValues = ValueVTs.size();
6186         Value != NumValues; ++Value) {
6187      EVT VT = ValueVTs[Value];
6188      const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
6189      SDValue Op = SDValue(Args[i].Node.getNode(),
6190                           Args[i].Node.getResNo() + Value);
6191      ISD::ArgFlagsTy Flags;
6192      unsigned OriginalAlignment =
6193        getTargetData()->getABITypeAlignment(ArgTy);
6194
6195      if (Args[i].isZExt)
6196        Flags.setZExt();
6197      if (Args[i].isSExt)
6198        Flags.setSExt();
6199      if (Args[i].isInReg)
6200        Flags.setInReg();
6201      if (Args[i].isSRet)
6202        Flags.setSRet();
6203      if (Args[i].isByVal) {
6204        Flags.setByVal();
6205        const PointerType *Ty = cast<PointerType>(Args[i].Ty);
6206        const Type *ElementTy = Ty->getElementType();
6207        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
6208        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
6209        // For ByVal, alignment should come from FE.  BE will guess if this
6210        // info is not there but there are cases it cannot get right.
6211        if (Args[i].Alignment)
6212          FrameAlign = Args[i].Alignment;
6213        Flags.setByValAlign(FrameAlign);
6214        Flags.setByValSize(FrameSize);
6215      }
6216      if (Args[i].isNest)
6217        Flags.setNest();
6218      Flags.setOrigAlign(OriginalAlignment);
6219
6220      EVT PartVT = getRegisterType(RetTy->getContext(), VT);
6221      unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
6222      SmallVector<SDValue, 4> Parts(NumParts);
6223      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
6224
6225      if (Args[i].isSExt)
6226        ExtendKind = ISD::SIGN_EXTEND;
6227      else if (Args[i].isZExt)
6228        ExtendKind = ISD::ZERO_EXTEND;
6229
6230      getCopyToParts(DAG, dl, Order, Op, &Parts[0], NumParts,
6231                     PartVT, ExtendKind);
6232
6233      for (unsigned j = 0; j != NumParts; ++j) {
6234        // if it isn't first piece, alignment must be 1
6235        ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
6236        if (NumParts > 1 && j == 0)
6237          MyFlags.Flags.setSplit();
6238        else if (j != 0)
6239          MyFlags.Flags.setOrigAlign(1);
6240
6241        Outs.push_back(MyFlags);
6242      }
6243    }
6244  }
6245
6246  // Handle the incoming return values from the call.
6247  SmallVector<ISD::InputArg, 32> Ins;
6248  SmallVector<EVT, 4> RetTys;
6249  ComputeValueVTs(*this, RetTy, RetTys);
6250  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
6251    EVT VT = RetTys[I];
6252    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
6253    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
6254    for (unsigned i = 0; i != NumRegs; ++i) {
6255      ISD::InputArg MyFlags;
6256      MyFlags.VT = RegisterVT;
6257      MyFlags.Used = isReturnValueUsed;
6258      if (RetSExt)
6259        MyFlags.Flags.setSExt();
6260      if (RetZExt)
6261        MyFlags.Flags.setZExt();
6262      if (isInreg)
6263        MyFlags.Flags.setInReg();
6264      Ins.push_back(MyFlags);
6265    }
6266  }
6267
6268  // Check if target-dependent constraints permit a tail call here.
6269  // Target-independent constraints should be checked by the caller.
6270  if (isTailCall &&
6271      !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
6272    isTailCall = false;
6273
6274  SmallVector<SDValue, 4> InVals;
6275  Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
6276                    Outs, Ins, dl, DAG, InVals);
6277
6278  // Verify that the target's LowerCall behaved as expected.
6279  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
6280         "LowerCall didn't return a valid chain!");
6281  assert((!isTailCall || InVals.empty()) &&
6282         "LowerCall emitted a return value for a tail call!");
6283  assert((isTailCall || InVals.size() == Ins.size()) &&
6284         "LowerCall didn't emit the correct number of values!");
6285  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
6286          assert(InVals[i].getNode() &&
6287                 "LowerCall emitted a null value!");
6288          assert(Ins[i].VT == InVals[i].getValueType() &&
6289                 "LowerCall emitted a value with the wrong type!");
6290        });
6291
6292  if (DisableScheduling)
6293    DAG.AssignOrdering(Chain.getNode(), Order);
6294
6295  // For a tail call, the return value is merely live-out and there aren't
6296  // any nodes in the DAG representing it. Return a special value to
6297  // indicate that a tail call has been emitted and no more Instructions
6298  // should be processed in the current block.
6299  if (isTailCall) {
6300    DAG.setRoot(Chain);
6301    return std::make_pair(SDValue(), SDValue());
6302  }
6303
6304  // Collect the legal value parts into potentially illegal values
6305  // that correspond to the original function's return values.
6306  ISD::NodeType AssertOp = ISD::DELETED_NODE;
6307  if (RetSExt)
6308    AssertOp = ISD::AssertSext;
6309  else if (RetZExt)
6310    AssertOp = ISD::AssertZext;
6311  SmallVector<SDValue, 4> ReturnValues;
6312  unsigned CurReg = 0;
6313  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
6314    EVT VT = RetTys[I];
6315    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
6316    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
6317
6318    SDValue ReturnValue =
6319      getCopyFromParts(DAG, dl, Order, &InVals[CurReg], NumRegs,
6320                       RegisterVT, VT, AssertOp);
6321    ReturnValues.push_back(ReturnValue);
6322    if (DisableScheduling)
6323      DAG.AssignOrdering(ReturnValue.getNode(), Order);
6324    CurReg += NumRegs;
6325  }
6326
6327  // For a function returning void, there is no return value. We can't create
6328  // such a node, so we just return a null return value in that case. In
6329  // that case, nothing will actualy look at the value.
6330  if (ReturnValues.empty())
6331    return std::make_pair(SDValue(), Chain);
6332
6333  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
6334                            DAG.getVTList(&RetTys[0], RetTys.size()),
6335                            &ReturnValues[0], ReturnValues.size());
6336  if (DisableScheduling)
6337    DAG.AssignOrdering(Res.getNode(), Order);
6338  return std::make_pair(Res, Chain);
6339}
6340
6341void TargetLowering::LowerOperationWrapper(SDNode *N,
6342                                           SmallVectorImpl<SDValue> &Results,
6343                                           SelectionDAG &DAG) {
6344  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
6345  if (Res.getNode())
6346    Results.push_back(Res);
6347}
6348
6349SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
6350  llvm_unreachable("LowerOperation not implemented for this target!");
6351  return SDValue();
6352}
6353
6354void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
6355  SDValue Op = getValue(V);
6356  assert((Op.getOpcode() != ISD::CopyFromReg ||
6357          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
6358         "Copy from a reg to the same reg!");
6359  assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
6360
6361  RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
6362  SDValue Chain = DAG.getEntryNode();
6363  RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), SDNodeOrder, Chain, 0);
6364  PendingExports.push_back(Chain);
6365}
6366
6367#include "llvm/CodeGen/SelectionDAGISel.h"
6368
6369void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
6370  // If this is the entry block, emit arguments.
6371  Function &F = *LLVMBB->getParent();
6372  SelectionDAG &DAG = SDB->DAG;
6373  SDValue OldRoot = DAG.getRoot();
6374  DebugLoc dl = SDB->getCurDebugLoc();
6375  const TargetData *TD = TLI.getTargetData();
6376  SmallVector<ISD::InputArg, 16> Ins;
6377
6378  // Check whether the function can return without sret-demotion.
6379  SmallVector<EVT, 4> OutVTs;
6380  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
6381  getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
6382                OutVTs, OutsFlags, TLI);
6383  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
6384
6385  FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
6386                                          OutVTs, OutsFlags, DAG);
6387  if (!FLI.CanLowerReturn) {
6388    // Put in an sret pointer parameter before all the other parameters.
6389    SmallVector<EVT, 1> ValueVTs;
6390    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
6391
6392    // NOTE: Assuming that a pointer will never break down to more than one VT
6393    // or one register.
6394    ISD::ArgFlagsTy Flags;
6395    Flags.setSRet();
6396    EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), ValueVTs[0]);
6397    ISD::InputArg RetArg(Flags, RegisterVT, true);
6398    Ins.push_back(RetArg);
6399  }
6400
6401  // Set up the incoming argument description vector.
6402  unsigned Idx = 1;
6403  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
6404       I != E; ++I, ++Idx) {
6405    SmallVector<EVT, 4> ValueVTs;
6406    ComputeValueVTs(TLI, I->getType(), ValueVTs);
6407    bool isArgValueUsed = !I->use_empty();
6408    for (unsigned Value = 0, NumValues = ValueVTs.size();
6409         Value != NumValues; ++Value) {
6410      EVT VT = ValueVTs[Value];
6411      const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
6412      ISD::ArgFlagsTy Flags;
6413      unsigned OriginalAlignment =
6414        TD->getABITypeAlignment(ArgTy);
6415
6416      if (F.paramHasAttr(Idx, Attribute::ZExt))
6417        Flags.setZExt();
6418      if (F.paramHasAttr(Idx, Attribute::SExt))
6419        Flags.setSExt();
6420      if (F.paramHasAttr(Idx, Attribute::InReg))
6421        Flags.setInReg();
6422      if (F.paramHasAttr(Idx, Attribute::StructRet))
6423        Flags.setSRet();
6424      if (F.paramHasAttr(Idx, Attribute::ByVal)) {
6425        Flags.setByVal();
6426        const PointerType *Ty = cast<PointerType>(I->getType());
6427        const Type *ElementTy = Ty->getElementType();
6428        unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
6429        unsigned FrameSize  = TD->getTypeAllocSize(ElementTy);
6430        // For ByVal, alignment should be passed from FE.  BE will guess if
6431        // this info is not there but there are cases it cannot get right.
6432        if (F.getParamAlignment(Idx))
6433          FrameAlign = F.getParamAlignment(Idx);
6434        Flags.setByValAlign(FrameAlign);
6435        Flags.setByValSize(FrameSize);
6436      }
6437      if (F.paramHasAttr(Idx, Attribute::Nest))
6438        Flags.setNest();
6439      Flags.setOrigAlign(OriginalAlignment);
6440
6441      EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
6442      unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6443      for (unsigned i = 0; i != NumRegs; ++i) {
6444        ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
6445        if (NumRegs > 1 && i == 0)
6446          MyFlags.Flags.setSplit();
6447        // if it isn't first piece, alignment must be 1
6448        else if (i > 0)
6449          MyFlags.Flags.setOrigAlign(1);
6450        Ins.push_back(MyFlags);
6451      }
6452    }
6453  }
6454
6455  // Call the target to set up the argument values.
6456  SmallVector<SDValue, 8> InVals;
6457  SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
6458                                             F.isVarArg(), Ins,
6459                                             dl, DAG, InVals);
6460
6461  // Verify that the target's LowerFormalArguments behaved as expected.
6462  assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
6463         "LowerFormalArguments didn't return a valid chain!");
6464  assert(InVals.size() == Ins.size() &&
6465         "LowerFormalArguments didn't emit the correct number of values!");
6466  DEBUG({
6467      for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
6468        assert(InVals[i].getNode() &&
6469               "LowerFormalArguments emitted a null value!");
6470        assert(Ins[i].VT == InVals[i].getValueType() &&
6471               "LowerFormalArguments emitted a value with the wrong type!");
6472      }
6473    });
6474
6475  // Update the DAG with the new chain value resulting from argument lowering.
6476  DAG.setRoot(NewRoot);
6477
6478  // Set up the argument values.
6479  unsigned i = 0;
6480  Idx = 1;
6481  if (!FLI.CanLowerReturn) {
6482    // Create a virtual register for the sret pointer, and put in a copy
6483    // from the sret argument into it.
6484    SmallVector<EVT, 1> ValueVTs;
6485    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
6486    EVT VT = ValueVTs[0];
6487    EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
6488    ISD::NodeType AssertOp = ISD::DELETED_NODE;
6489    SDValue ArgValue = getCopyFromParts(DAG, dl, 0, &InVals[0], 1,
6490                                        RegVT, VT, AssertOp);
6491
6492    MachineFunction& MF = SDB->DAG.getMachineFunction();
6493    MachineRegisterInfo& RegInfo = MF.getRegInfo();
6494    unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
6495    FLI.DemoteRegister = SRetReg;
6496    NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(), SRetReg, ArgValue);
6497    DAG.setRoot(NewRoot);
6498
6499    // i indexes lowered arguments.  Bump it past the hidden sret argument.
6500    // Idx indexes LLVM arguments.  Don't touch it.
6501    ++i;
6502  }
6503
6504  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
6505      ++I, ++Idx) {
6506    SmallVector<SDValue, 4> ArgValues;
6507    SmallVector<EVT, 4> ValueVTs;
6508    ComputeValueVTs(TLI, I->getType(), ValueVTs);
6509    unsigned NumValues = ValueVTs.size();
6510    for (unsigned Value = 0; Value != NumValues; ++Value) {
6511      EVT VT = ValueVTs[Value];
6512      EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
6513      unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6514
6515      if (!I->use_empty()) {
6516        ISD::NodeType AssertOp = ISD::DELETED_NODE;
6517        if (F.paramHasAttr(Idx, Attribute::SExt))
6518          AssertOp = ISD::AssertSext;
6519        else if (F.paramHasAttr(Idx, Attribute::ZExt))
6520          AssertOp = ISD::AssertZext;
6521
6522        ArgValues.push_back(getCopyFromParts(DAG, dl, 0, &InVals[i],
6523                                             NumParts, PartVT, VT,
6524                                             AssertOp));
6525      }
6526
6527      i += NumParts;
6528    }
6529
6530    if (!I->use_empty()) {
6531      SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
6532                                       SDB->getCurDebugLoc());
6533      SDB->setValue(I, Res);
6534
6535      // If this argument is live outside of the entry block, insert a copy from
6536      // whereever we got it to the vreg that other BB's will reference it as.
6537      SDB->CopyToExportRegsIfNeeded(I);
6538    }
6539  }
6540
6541  assert(i == InVals.size() && "Argument register count mismatch!");
6542
6543  // Finally, if the target has anything special to do, allow it to do so.
6544  // FIXME: this should insert code into the DAG!
6545  EmitFunctionEntryCode(F, SDB->DAG.getMachineFunction());
6546}
6547
6548/// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
6549/// ensure constants are generated when needed.  Remember the virtual registers
6550/// that need to be added to the Machine PHI nodes as input.  We cannot just
6551/// directly add them, because expansion might result in multiple MBB's for one
6552/// BB.  As such, the start of the BB might correspond to a different MBB than
6553/// the end.
6554///
6555void
6556SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
6557  TerminatorInst *TI = LLVMBB->getTerminator();
6558
6559  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6560
6561  // Check successor nodes' PHI nodes that expect a constant to be available
6562  // from this block.
6563  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6564    BasicBlock *SuccBB = TI->getSuccessor(succ);
6565    if (!isa<PHINode>(SuccBB->begin())) continue;
6566    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6567
6568    // If this terminator has multiple identical successors (common for
6569    // switches), only handle each succ once.
6570    if (!SuccsHandled.insert(SuccMBB)) continue;
6571
6572    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6573    PHINode *PN;
6574
6575    // At this point we know that there is a 1-1 correspondence between LLVM PHI
6576    // nodes and Machine PHI nodes, but the incoming operands have not been
6577    // emitted yet.
6578    for (BasicBlock::iterator I = SuccBB->begin();
6579         (PN = dyn_cast<PHINode>(I)); ++I) {
6580      // Ignore dead phi's.
6581      if (PN->use_empty()) continue;
6582
6583      unsigned Reg;
6584      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6585
6586      if (Constant *C = dyn_cast<Constant>(PHIOp)) {
6587        unsigned &RegOut = SDB->ConstantsOut[C];
6588        if (RegOut == 0) {
6589          RegOut = FuncInfo->CreateRegForValue(C);
6590          SDB->CopyValueToVirtualRegister(C, RegOut);
6591        }
6592        Reg = RegOut;
6593      } else {
6594        Reg = FuncInfo->ValueMap[PHIOp];
6595        if (Reg == 0) {
6596          assert(isa<AllocaInst>(PHIOp) &&
6597                 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
6598                 "Didn't codegen value into a register!??");
6599          Reg = FuncInfo->CreateRegForValue(PHIOp);
6600          SDB->CopyValueToVirtualRegister(PHIOp, Reg);
6601        }
6602      }
6603
6604      // Remember that this register needs to added to the machine PHI node as
6605      // the input for this MBB.
6606      SmallVector<EVT, 4> ValueVTs;
6607      ComputeValueVTs(TLI, PN->getType(), ValueVTs);
6608      for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
6609        EVT VT = ValueVTs[vti];
6610        unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6611        for (unsigned i = 0, e = NumRegisters; i != e; ++i)
6612          SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
6613        Reg += NumRegisters;
6614      }
6615    }
6616  }
6617  SDB->ConstantsOut.clear();
6618}
6619
6620/// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
6621/// supports legal types, and it emits MachineInstrs directly instead of
6622/// creating SelectionDAG nodes.
6623///
6624bool
6625SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
6626                                                      FastISel *F) {
6627  TerminatorInst *TI = LLVMBB->getTerminator();
6628
6629  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6630  unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
6631
6632  // Check successor nodes' PHI nodes that expect a constant to be available
6633  // from this block.
6634  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6635    BasicBlock *SuccBB = TI->getSuccessor(succ);
6636    if (!isa<PHINode>(SuccBB->begin())) continue;
6637    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6638
6639    // If this terminator has multiple identical successors (common for
6640    // switches), only handle each succ once.
6641    if (!SuccsHandled.insert(SuccMBB)) continue;
6642
6643    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6644    PHINode *PN;
6645
6646    // At this point we know that there is a 1-1 correspondence between LLVM PHI
6647    // nodes and Machine PHI nodes, but the incoming operands have not been
6648    // emitted yet.
6649    for (BasicBlock::iterator I = SuccBB->begin();
6650         (PN = dyn_cast<PHINode>(I)); ++I) {
6651      // Ignore dead phi's.
6652      if (PN->use_empty()) continue;
6653
6654      // Only handle legal types. Two interesting things to note here. First,
6655      // by bailing out early, we may leave behind some dead instructions,
6656      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6657      // own moves. Second, this check is necessary becuase FastISel doesn't
6658      // use CreateRegForValue to create registers, so it always creates
6659      // exactly one register for each non-void instruction.
6660      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6661      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6662        // Promote MVT::i1.
6663        if (VT == MVT::i1)
6664          VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
6665        else {
6666          SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6667          return false;
6668        }
6669      }
6670
6671      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6672
6673      unsigned Reg = F->getRegForValue(PHIOp);
6674      if (Reg == 0) {
6675        SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6676        return false;
6677      }
6678      SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
6679    }
6680  }
6681
6682  return true;
6683}
6684