SelectionDAGBuilder.cpp revision bf0ca2b477e761e2c81f6c36d6c7bec055933b15
1//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating from LLVM IR into SelectionDAG IR.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "SelectionDAGBuilder.h"
16#include "FunctionLoweringInfo.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/ConstantFolding.h"
21#include "llvm/Constants.h"
22#include "llvm/CallingConv.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Function.h"
25#include "llvm/GlobalVariable.h"
26#include "llvm/InlineAsm.h"
27#include "llvm/Instructions.h"
28#include "llvm/Intrinsics.h"
29#include "llvm/IntrinsicInst.h"
30#include "llvm/Module.h"
31#include "llvm/CodeGen/FastISel.h"
32#include "llvm/CodeGen/GCStrategy.h"
33#include "llvm/CodeGen/GCMetadata.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/PseudoSourceValue.h"
41#include "llvm/CodeGen/SelectionDAG.h"
42#include "llvm/CodeGen/DwarfWriter.h"
43#include "llvm/Analysis/DebugInfo.h"
44#include "llvm/Target/TargetRegisterInfo.h"
45#include "llvm/Target/TargetData.h"
46#include "llvm/Target/TargetFrameInfo.h"
47#include "llvm/Target/TargetInstrInfo.h"
48#include "llvm/Target/TargetIntrinsicInfo.h"
49#include "llvm/Target/TargetLowering.h"
50#include "llvm/Target/TargetOptions.h"
51#include "llvm/Support/Compiler.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MathExtras.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58using namespace llvm;
59
60/// LimitFloatPrecision - Generate low-precision inline sequences for
61/// some float libcalls (6, 8 or 12 bits).
62static unsigned LimitFloatPrecision;
63
64static cl::opt<unsigned, true>
65LimitFPPrecision("limit-float-precision",
66                 cl::desc("Generate low-precision inline sequences "
67                          "for some float libcalls"),
68                 cl::location(LimitFloatPrecision),
69                 cl::init(0));
70
71namespace {
72  /// RegsForValue - This struct represents the registers (physical or virtual)
73  /// that a particular set of values is assigned, and the type information about
74  /// the value. The most common situation is to represent one value at a time,
75  /// but struct or array values are handled element-wise as multiple values.
76  /// The splitting of aggregates is performed recursively, so that we never
77  /// have aggregate-typed registers. The values at this point do not necessarily
78  /// have legal types, so each value may require one or more registers of some
79  /// legal type.
80  ///
81  struct RegsForValue {
82    /// TLI - The TargetLowering object.
83    ///
84    const TargetLowering *TLI;
85
86    /// ValueVTs - The value types of the values, which may not be legal, and
87    /// may need be promoted or synthesized from one or more registers.
88    ///
89    SmallVector<EVT, 4> ValueVTs;
90
91    /// RegVTs - The value types of the registers. This is the same size as
92    /// ValueVTs and it records, for each value, what the type of the assigned
93    /// register or registers are. (Individual values are never synthesized
94    /// from more than one type of register.)
95    ///
96    /// With virtual registers, the contents of RegVTs is redundant with TLI's
97    /// getRegisterType member function, however when with physical registers
98    /// it is necessary to have a separate record of the types.
99    ///
100    SmallVector<EVT, 4> RegVTs;
101
102    /// Regs - This list holds the registers assigned to the values.
103    /// Each legal or promoted value requires one register, and each
104    /// expanded value requires multiple registers.
105    ///
106    SmallVector<unsigned, 4> Regs;
107
108    RegsForValue() : TLI(0) {}
109
110    RegsForValue(const TargetLowering &tli,
111                 const SmallVector<unsigned, 4> &regs,
112                 EVT regvt, EVT valuevt)
113      : TLI(&tli),  ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
114    RegsForValue(const TargetLowering &tli,
115                 const SmallVector<unsigned, 4> &regs,
116                 const SmallVector<EVT, 4> &regvts,
117                 const SmallVector<EVT, 4> &valuevts)
118      : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
119    RegsForValue(LLVMContext &Context, const TargetLowering &tli,
120                 unsigned Reg, const Type *Ty) : TLI(&tli) {
121      ComputeValueVTs(tli, Ty, ValueVTs);
122
123      for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
124        EVT ValueVT = ValueVTs[Value];
125        unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
126        EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
127        for (unsigned i = 0; i != NumRegs; ++i)
128          Regs.push_back(Reg + i);
129        RegVTs.push_back(RegisterVT);
130        Reg += NumRegs;
131      }
132    }
133
134    /// append - Add the specified values to this one.
135    void append(const RegsForValue &RHS) {
136      TLI = RHS.TLI;
137      ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
138      RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
139      Regs.append(RHS.Regs.begin(), RHS.Regs.end());
140    }
141
142
143    /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
144    /// this value and returns the result as a ValueVTs value.  This uses
145    /// Chain/Flag as the input and updates them for the output Chain/Flag.
146    /// If the Flag pointer is NULL, no flag is used.
147    SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
148                            SDValue &Chain, SDValue *Flag) const;
149
150    /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
151    /// specified value into the registers specified by this object.  This uses
152    /// Chain/Flag as the input and updates them for the output Chain/Flag.
153    /// If the Flag pointer is NULL, no flag is used.
154    void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
155                       unsigned Order, SDValue &Chain, SDValue *Flag) const;
156
157    /// AddInlineAsmOperands - Add this value to the specified inlineasm node
158    /// operand list.  This adds the code marker, matching input operand index
159    /// (if applicable), and includes the number of values added into it.
160    void AddInlineAsmOperands(unsigned Code,
161                              bool HasMatching, unsigned MatchingIdx,
162                              SelectionDAG &DAG, unsigned Order,
163                              std::vector<SDValue> &Ops) const;
164  };
165}
166
167/// getCopyFromParts - Create a value that contains the specified legal parts
168/// combined into the value they represent.  If the parts combine to a type
169/// larger then ValueVT then AssertOp can be used to specify whether the extra
170/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
171/// (ISD::AssertSext).
172static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
173                                const SDValue *Parts,
174                                unsigned NumParts, EVT PartVT, EVT ValueVT,
175                                ISD::NodeType AssertOp = ISD::DELETED_NODE) {
176  assert(NumParts > 0 && "No parts to assemble!");
177  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
178  SDValue Val = Parts[0];
179  if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
180
181  if (NumParts > 1) {
182    // Assemble the value from multiple parts.
183    if (!ValueVT.isVector() && ValueVT.isInteger()) {
184      unsigned PartBits = PartVT.getSizeInBits();
185      unsigned ValueBits = ValueVT.getSizeInBits();
186
187      // Assemble the power of 2 part.
188      unsigned RoundParts = NumParts & (NumParts - 1) ?
189        1 << Log2_32(NumParts) : NumParts;
190      unsigned RoundBits = PartBits * RoundParts;
191      EVT RoundVT = RoundBits == ValueBits ?
192        ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
193      SDValue Lo, Hi;
194
195      EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
196
197      if (RoundParts > 2) {
198        Lo = getCopyFromParts(DAG, dl, Order, Parts, RoundParts / 2,
199                              PartVT, HalfVT);
200        Hi = getCopyFromParts(DAG, dl, Order, Parts + RoundParts / 2,
201                              RoundParts / 2, PartVT, HalfVT);
202      } else {
203        Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
204        Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
205      }
206
207      if (TLI.isBigEndian())
208        std::swap(Lo, Hi);
209
210      Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
211
212      if (DisableScheduling) {
213        DAG.AssignOrdering(Lo.getNode(), Order);
214        DAG.AssignOrdering(Hi.getNode(), Order);
215        DAG.AssignOrdering(Val.getNode(), Order);
216      }
217
218      if (RoundParts < NumParts) {
219        // Assemble the trailing non-power-of-2 part.
220        unsigned OddParts = NumParts - RoundParts;
221        EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
222        Hi = getCopyFromParts(DAG, dl, Order,
223                              Parts + RoundParts, OddParts, PartVT, OddVT);
224
225        // Combine the round and odd parts.
226        Lo = Val;
227        if (TLI.isBigEndian())
228          std::swap(Lo, Hi);
229        EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
230        Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
231        if (DisableScheduling) DAG.AssignOrdering(Hi.getNode(), Order);
232        Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
233                         DAG.getConstant(Lo.getValueType().getSizeInBits(),
234                                         TLI.getPointerTy()));
235        if (DisableScheduling) DAG.AssignOrdering(Hi.getNode(), Order);
236        Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
237        if (DisableScheduling) DAG.AssignOrdering(Lo.getNode(), Order);
238        Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
239        if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
240      }
241    } else if (ValueVT.isVector()) {
242      // Handle a multi-element vector.
243      EVT IntermediateVT, RegisterVT;
244      unsigned NumIntermediates;
245      unsigned NumRegs =
246        TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
247                                   NumIntermediates, RegisterVT);
248      assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
249      NumParts = NumRegs; // Silence a compiler warning.
250      assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
251      assert(RegisterVT == Parts[0].getValueType() &&
252             "Part type doesn't match part!");
253
254      // Assemble the parts into intermediate operands.
255      SmallVector<SDValue, 8> Ops(NumIntermediates);
256      if (NumIntermediates == NumParts) {
257        // If the register was not expanded, truncate or copy the value,
258        // as appropriate.
259        for (unsigned i = 0; i != NumParts; ++i)
260          Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i], 1,
261                                    PartVT, IntermediateVT);
262      } else if (NumParts > 0) {
263        // If the intermediate type was expanded, build the intermediate operands
264        // from the parts.
265        assert(NumParts % NumIntermediates == 0 &&
266               "Must expand into a divisible number of parts!");
267        unsigned Factor = NumParts / NumIntermediates;
268        for (unsigned i = 0; i != NumIntermediates; ++i)
269          Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i * Factor], Factor,
270                                    PartVT, IntermediateVT);
271      }
272
273      // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
274      // operands.
275      Val = DAG.getNode(IntermediateVT.isVector() ?
276                        ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
277                        ValueVT, &Ops[0], NumIntermediates);
278      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
279    } else if (PartVT.isFloatingPoint()) {
280      // FP split into multiple FP parts (for ppcf128)
281      assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
282             "Unexpected split");
283      SDValue Lo, Hi;
284      Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
285      Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
286      if (TLI.isBigEndian())
287        std::swap(Lo, Hi);
288      Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
289
290      if (DisableScheduling) {
291        DAG.AssignOrdering(Hi.getNode(), Order);
292        DAG.AssignOrdering(Lo.getNode(), Order);
293        DAG.AssignOrdering(Val.getNode(), Order);
294      }
295    } else {
296      // FP split into integer parts (soft fp)
297      assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
298             !PartVT.isVector() && "Unexpected split");
299      EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
300      Val = getCopyFromParts(DAG, dl, Order, Parts, NumParts, PartVT, IntVT);
301    }
302  }
303
304  // There is now one part, held in Val.  Correct it to match ValueVT.
305  PartVT = Val.getValueType();
306
307  if (PartVT == ValueVT)
308    return Val;
309
310  if (PartVT.isVector()) {
311    assert(ValueVT.isVector() && "Unknown vector conversion!");
312    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
313    if (DisableScheduling)
314      DAG.AssignOrdering(Res.getNode(), Order);
315    return Res;
316  }
317
318  if (ValueVT.isVector()) {
319    assert(ValueVT.getVectorElementType() == PartVT &&
320           ValueVT.getVectorNumElements() == 1 &&
321           "Only trivial scalar-to-vector conversions should get here!");
322    SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
323    if (DisableScheduling)
324      DAG.AssignOrdering(Res.getNode(), Order);
325    return Res;
326  }
327
328  if (PartVT.isInteger() &&
329      ValueVT.isInteger()) {
330    if (ValueVT.bitsLT(PartVT)) {
331      // For a truncate, see if we have any information to
332      // indicate whether the truncated bits will always be
333      // zero or sign-extension.
334      if (AssertOp != ISD::DELETED_NODE)
335        Val = DAG.getNode(AssertOp, dl, PartVT, Val,
336                          DAG.getValueType(ValueVT));
337      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
338      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
339      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
340      return Val;
341    } else {
342      Val = DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
343      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
344      return Val;
345    }
346  }
347
348  if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
349    if (ValueVT.bitsLT(Val.getValueType())) {
350      // FP_ROUND's are always exact here.
351      Val = DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
352                        DAG.getIntPtrConstant(1));
353      if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
354      return Val;
355    }
356
357    Val = DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
358    if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
359    return Val;
360  }
361
362  if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
363    Val = DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
364    if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
365    return Val;
366  }
367
368  llvm_unreachable("Unknown mismatch!");
369  return SDValue();
370}
371
372/// getCopyToParts - Create a series of nodes that contain the specified value
373/// split into legal parts.  If the parts contain more bits than Val, then, for
374/// integers, ExtendKind can be used to specify how to generate the extra bits.
375static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
376                           SDValue Val, SDValue *Parts, unsigned NumParts,
377                           EVT PartVT,
378                           ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
379  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
380  EVT PtrVT = TLI.getPointerTy();
381  EVT ValueVT = Val.getValueType();
382  unsigned PartBits = PartVT.getSizeInBits();
383  unsigned OrigNumParts = NumParts;
384  assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
385
386  if (!NumParts)
387    return;
388
389  if (!ValueVT.isVector()) {
390    if (PartVT == ValueVT) {
391      assert(NumParts == 1 && "No-op copy with multiple parts!");
392      Parts[0] = Val;
393      return;
394    }
395
396    if (NumParts * PartBits > ValueVT.getSizeInBits()) {
397      // If the parts cover more bits than the value has, promote the value.
398      if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
399        assert(NumParts == 1 && "Do not know what to promote to!");
400        Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
401      } else if (PartVT.isInteger() && ValueVT.isInteger()) {
402        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
403        Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
404      } else {
405        llvm_unreachable("Unknown mismatch!");
406      }
407    } else if (PartBits == ValueVT.getSizeInBits()) {
408      // Different types of the same size.
409      assert(NumParts == 1 && PartVT != ValueVT);
410      Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
411    } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
412      // If the parts cover less bits than value has, truncate the value.
413      if (PartVT.isInteger() && ValueVT.isInteger()) {
414        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
415        Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
416      } else {
417        llvm_unreachable("Unknown mismatch!");
418      }
419    }
420
421    if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order);
422
423    // The value may have changed - recompute ValueVT.
424    ValueVT = Val.getValueType();
425    assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
426           "Failed to tile the value with PartVT!");
427
428    if (NumParts == 1) {
429      assert(PartVT == ValueVT && "Type conversion failed!");
430      Parts[0] = Val;
431      return;
432    }
433
434    // Expand the value into multiple parts.
435    if (NumParts & (NumParts - 1)) {
436      // The number of parts is not a power of 2.  Split off and copy the tail.
437      assert(PartVT.isInteger() && ValueVT.isInteger() &&
438             "Do not know what to expand to!");
439      unsigned RoundParts = 1 << Log2_32(NumParts);
440      unsigned RoundBits = RoundParts * PartBits;
441      unsigned OddParts = NumParts - RoundParts;
442      SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
443                                   DAG.getConstant(RoundBits,
444                                                   TLI.getPointerTy()));
445      getCopyToParts(DAG, dl, Order, OddVal, Parts + RoundParts,
446                     OddParts, PartVT);
447
448      if (TLI.isBigEndian())
449        // The odd parts were reversed by getCopyToParts - unreverse them.
450        std::reverse(Parts + RoundParts, Parts + NumParts);
451
452      NumParts = RoundParts;
453      ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
454      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
455
456      if (DisableScheduling) {
457        DAG.AssignOrdering(OddVal.getNode(), Order);
458        DAG.AssignOrdering(Val.getNode(), Order);
459      }
460    }
461
462    // The number of parts is a power of 2.  Repeatedly bisect the value using
463    // EXTRACT_ELEMENT.
464    Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
465                           EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
466                           Val);
467
468    if (DisableScheduling)
469      DAG.AssignOrdering(Parts[0].getNode(), Order);
470
471    for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
472      for (unsigned i = 0; i < NumParts; i += StepSize) {
473        unsigned ThisBits = StepSize * PartBits / 2;
474        EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
475        SDValue &Part0 = Parts[i];
476        SDValue &Part1 = Parts[i+StepSize/2];
477
478        Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
479                            ThisVT, Part0,
480                            DAG.getConstant(1, PtrVT));
481        Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
482                            ThisVT, Part0,
483                            DAG.getConstant(0, PtrVT));
484
485        if (DisableScheduling) {
486          DAG.AssignOrdering(Part0.getNode(), Order);
487          DAG.AssignOrdering(Part1.getNode(), Order);
488        }
489
490        if (ThisBits == PartBits && ThisVT != PartVT) {
491          Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
492                                                PartVT, Part0);
493          Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
494                                                PartVT, Part1);
495          if (DisableScheduling) {
496            DAG.AssignOrdering(Part0.getNode(), Order);
497            DAG.AssignOrdering(Part1.getNode(), Order);
498          }
499        }
500      }
501    }
502
503    if (TLI.isBigEndian())
504      std::reverse(Parts, Parts + OrigNumParts);
505
506    return;
507  }
508
509  // Vector ValueVT.
510  if (NumParts == 1) {
511    if (PartVT != ValueVT) {
512      if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
513        Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
514      } else {
515        assert(ValueVT.getVectorElementType() == PartVT &&
516               ValueVT.getVectorNumElements() == 1 &&
517               "Only trivial vector-to-scalar conversions should get here!");
518        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
519                          PartVT, Val,
520                          DAG.getConstant(0, PtrVT));
521      }
522    }
523
524    if (DisableScheduling)
525      DAG.AssignOrdering(Val.getNode(), Order);
526
527    Parts[0] = Val;
528    return;
529  }
530
531  // Handle a multi-element vector.
532  EVT IntermediateVT, RegisterVT;
533  unsigned NumIntermediates;
534  unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
535                              IntermediateVT, NumIntermediates, RegisterVT);
536  unsigned NumElements = ValueVT.getVectorNumElements();
537
538  assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
539  NumParts = NumRegs; // Silence a compiler warning.
540  assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
541
542  // Split the vector into intermediate operands.
543  SmallVector<SDValue, 8> Ops(NumIntermediates);
544  for (unsigned i = 0; i != NumIntermediates; ++i) {
545    if (IntermediateVT.isVector())
546      Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
547                           IntermediateVT, Val,
548                           DAG.getConstant(i * (NumElements / NumIntermediates),
549                                           PtrVT));
550    else
551      Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
552                           IntermediateVT, Val,
553                           DAG.getConstant(i, PtrVT));
554
555    if (DisableScheduling)
556      DAG.AssignOrdering(Ops[i].getNode(), Order);
557  }
558
559  // Split the intermediate operands into legal parts.
560  if (NumParts == NumIntermediates) {
561    // If the register was not expanded, promote or copy the value,
562    // as appropriate.
563    for (unsigned i = 0; i != NumParts; ++i)
564      getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i], 1, PartVT);
565  } else if (NumParts > 0) {
566    // If the intermediate type was expanded, split each the value into
567    // legal parts.
568    assert(NumParts % NumIntermediates == 0 &&
569           "Must expand into a divisible number of parts!");
570    unsigned Factor = NumParts / NumIntermediates;
571    for (unsigned i = 0; i != NumIntermediates; ++i)
572      getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i*Factor], Factor, PartVT);
573  }
574}
575
576
577void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
578  AA = &aa;
579  GFI = gfi;
580  TD = DAG.getTarget().getTargetData();
581}
582
583/// clear - Clear out the curret SelectionDAG and the associated
584/// state and prepare this SelectionDAGBuilder object to be used
585/// for a new block. This doesn't clear out information about
586/// additional blocks that are needed to complete switch lowering
587/// or PHI node updating; that information is cleared out as it is
588/// consumed.
589void SelectionDAGBuilder::clear() {
590  NodeMap.clear();
591  PendingLoads.clear();
592  PendingExports.clear();
593  EdgeMapping.clear();
594  DAG.clear();
595  CurDebugLoc = DebugLoc::getUnknownLoc();
596  HasTailCall = false;
597}
598
599/// getRoot - Return the current virtual root of the Selection DAG,
600/// flushing any PendingLoad items. This must be done before emitting
601/// a store or any other node that may need to be ordered after any
602/// prior load instructions.
603///
604SDValue SelectionDAGBuilder::getRoot() {
605  if (PendingLoads.empty())
606    return DAG.getRoot();
607
608  if (PendingLoads.size() == 1) {
609    SDValue Root = PendingLoads[0];
610    DAG.setRoot(Root);
611    PendingLoads.clear();
612    return Root;
613  }
614
615  // Otherwise, we have to make a token factor node.
616  SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
617                               &PendingLoads[0], PendingLoads.size());
618  PendingLoads.clear();
619  DAG.setRoot(Root);
620  return Root;
621}
622
623/// getControlRoot - Similar to getRoot, but instead of flushing all the
624/// PendingLoad items, flush all the PendingExports items. It is necessary
625/// to do this before emitting a terminator instruction.
626///
627SDValue SelectionDAGBuilder::getControlRoot() {
628  SDValue Root = DAG.getRoot();
629
630  if (PendingExports.empty())
631    return Root;
632
633  // Turn all of the CopyToReg chains into one factored node.
634  if (Root.getOpcode() != ISD::EntryToken) {
635    unsigned i = 0, e = PendingExports.size();
636    for (; i != e; ++i) {
637      assert(PendingExports[i].getNode()->getNumOperands() > 1);
638      if (PendingExports[i].getNode()->getOperand(0) == Root)
639        break;  // Don't add the root if we already indirectly depend on it.
640    }
641
642    if (i == e)
643      PendingExports.push_back(Root);
644  }
645
646  Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
647                     &PendingExports[0],
648                     PendingExports.size());
649  PendingExports.clear();
650  DAG.setRoot(Root);
651  return Root;
652}
653
654void SelectionDAGBuilder::visit(Instruction &I) {
655  visit(I.getOpcode(), I);
656}
657
658void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
659  // We're processing a new instruction.
660  ++SDNodeOrder;
661
662  // Note: this doesn't use InstVisitor, because it has to work with
663  // ConstantExpr's in addition to instructions.
664  switch (Opcode) {
665  default: llvm_unreachable("Unknown instruction type encountered!");
666    // Build the switch statement using the Instruction.def file.
667#define HANDLE_INST(NUM, OPCODE, CLASS) \
668  case Instruction::OPCODE: return visit##OPCODE((CLASS&)I);
669#include "llvm/Instruction.def"
670  }
671}
672
673SDValue SelectionDAGBuilder::getValue(const Value *V) {
674  SDValue &N = NodeMap[V];
675  if (N.getNode()) return N;
676
677  if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
678    EVT VT = TLI.getValueType(V->getType(), true);
679
680    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
681      return N = DAG.getConstant(*CI, VT);
682
683    if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
684      return N = DAG.getGlobalAddress(GV, VT);
685
686    if (isa<ConstantPointerNull>(C))
687      return N = DAG.getConstant(0, TLI.getPointerTy());
688
689    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
690      return N = DAG.getConstantFP(*CFP, VT);
691
692    if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
693      return N = DAG.getUNDEF(VT);
694
695    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
696      visit(CE->getOpcode(), *CE);
697      SDValue N1 = NodeMap[V];
698      assert(N1.getNode() && "visit didn't populate the ValueMap!");
699      return N1;
700    }
701
702    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
703      SmallVector<SDValue, 4> Constants;
704      for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
705           OI != OE; ++OI) {
706        SDNode *Val = getValue(*OI).getNode();
707        // If the operand is an empty aggregate, there are no values.
708        if (!Val) continue;
709        // Add each leaf value from the operand to the Constants list
710        // to form a flattened list of all the values.
711        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
712          Constants.push_back(SDValue(Val, i));
713      }
714
715      SDValue Res = DAG.getMergeValues(&Constants[0], Constants.size(),
716                                       getCurDebugLoc());
717      if (DisableScheduling)
718        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
719      return Res;
720    }
721
722    if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
723      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
724             "Unknown struct or array constant!");
725
726      SmallVector<EVT, 4> ValueVTs;
727      ComputeValueVTs(TLI, C->getType(), ValueVTs);
728      unsigned NumElts = ValueVTs.size();
729      if (NumElts == 0)
730        return SDValue(); // empty struct
731      SmallVector<SDValue, 4> Constants(NumElts);
732      for (unsigned i = 0; i != NumElts; ++i) {
733        EVT EltVT = ValueVTs[i];
734        if (isa<UndefValue>(C))
735          Constants[i] = DAG.getUNDEF(EltVT);
736        else if (EltVT.isFloatingPoint())
737          Constants[i] = DAG.getConstantFP(0, EltVT);
738        else
739          Constants[i] = DAG.getConstant(0, EltVT);
740      }
741
742      SDValue Res = DAG.getMergeValues(&Constants[0], NumElts,
743                                       getCurDebugLoc());
744      if (DisableScheduling)
745        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
746      return Res;
747    }
748
749    if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
750      return DAG.getBlockAddress(BA, VT);
751
752    const VectorType *VecTy = cast<VectorType>(V->getType());
753    unsigned NumElements = VecTy->getNumElements();
754
755    // Now that we know the number and type of the elements, get that number of
756    // elements into the Ops array based on what kind of constant it is.
757    SmallVector<SDValue, 16> Ops;
758    if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
759      for (unsigned i = 0; i != NumElements; ++i)
760        Ops.push_back(getValue(CP->getOperand(i)));
761    } else {
762      assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
763      EVT EltVT = TLI.getValueType(VecTy->getElementType());
764
765      SDValue Op;
766      if (EltVT.isFloatingPoint())
767        Op = DAG.getConstantFP(0, EltVT);
768      else
769        Op = DAG.getConstant(0, EltVT);
770      Ops.assign(NumElements, Op);
771    }
772
773    // Create a BUILD_VECTOR node.
774    SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
775                              VT, &Ops[0], Ops.size());
776    if (DisableScheduling)
777      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
778
779    return NodeMap[V] = Res;
780  }
781
782  // If this is a static alloca, generate it as the frameindex instead of
783  // computation.
784  if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
785    DenseMap<const AllocaInst*, int>::iterator SI =
786      FuncInfo.StaticAllocaMap.find(AI);
787    if (SI != FuncInfo.StaticAllocaMap.end())
788      return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
789  }
790
791  unsigned InReg = FuncInfo.ValueMap[V];
792  assert(InReg && "Value not in map!");
793
794  RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
795  SDValue Chain = DAG.getEntryNode();
796  return RFV.getCopyFromRegs(DAG, getCurDebugLoc(),
797                             SDNodeOrder, Chain, NULL);
798}
799
800/// Get the EVTs and ArgFlags collections that represent the return type
801/// of the given function.  This does not require a DAG or a return value, and
802/// is suitable for use before any DAGs for the function are constructed.
803static void getReturnInfo(const Type* ReturnType,
804                   Attributes attr, SmallVectorImpl<EVT> &OutVTs,
805                   SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
806                   TargetLowering &TLI,
807                   SmallVectorImpl<uint64_t> *Offsets = 0) {
808  SmallVector<EVT, 4> ValueVTs;
809  ComputeValueVTs(TLI, ReturnType, ValueVTs, Offsets);
810  unsigned NumValues = ValueVTs.size();
811  if ( NumValues == 0 ) return;
812
813  for (unsigned j = 0, f = NumValues; j != f; ++j) {
814    EVT VT = ValueVTs[j];
815    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
816
817    if (attr & Attribute::SExt)
818      ExtendKind = ISD::SIGN_EXTEND;
819    else if (attr & Attribute::ZExt)
820      ExtendKind = ISD::ZERO_EXTEND;
821
822    // FIXME: C calling convention requires the return type to be promoted to
823    // at least 32-bit. But this is not necessary for non-C calling
824    // conventions. The frontend should mark functions whose return values
825    // require promoting with signext or zeroext attributes.
826    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
827      EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
828      if (VT.bitsLT(MinVT))
829        VT = MinVT;
830    }
831
832    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
833    EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
834    // 'inreg' on function refers to return value
835    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
836    if (attr & Attribute::InReg)
837      Flags.setInReg();
838
839    // Propagate extension type if any
840    if (attr & Attribute::SExt)
841      Flags.setSExt();
842    else if (attr & Attribute::ZExt)
843      Flags.setZExt();
844
845    for (unsigned i = 0; i < NumParts; ++i) {
846      OutVTs.push_back(PartVT);
847      OutFlags.push_back(Flags);
848    }
849  }
850}
851
852void SelectionDAGBuilder::visitRet(ReturnInst &I) {
853  SDValue Chain = getControlRoot();
854  SmallVector<ISD::OutputArg, 8> Outs;
855  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
856
857  if (!FLI.CanLowerReturn) {
858    unsigned DemoteReg = FLI.DemoteRegister;
859    const Function *F = I.getParent()->getParent();
860
861    // Emit a store of the return value through the virtual register.
862    // Leave Outs empty so that LowerReturn won't try to load return
863    // registers the usual way.
864    SmallVector<EVT, 1> PtrValueVTs;
865    ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
866                    PtrValueVTs);
867
868    SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
869    SDValue RetOp = getValue(I.getOperand(0));
870
871    SmallVector<EVT, 4> ValueVTs;
872    SmallVector<uint64_t, 4> Offsets;
873    ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
874    unsigned NumValues = ValueVTs.size();
875
876    SmallVector<SDValue, 4> Chains(NumValues);
877    EVT PtrVT = PtrValueVTs[0];
878    for (unsigned i = 0; i != NumValues; ++i) {
879      SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr,
880                                DAG.getConstant(Offsets[i], PtrVT));
881      Chains[i] =
882        DAG.getStore(Chain, getCurDebugLoc(),
883                     SDValue(RetOp.getNode(), RetOp.getResNo() + i),
884                     Add, NULL, Offsets[i], false, 0);
885
886      if (DisableScheduling) {
887        DAG.AssignOrdering(Add.getNode(), SDNodeOrder);
888        DAG.AssignOrdering(Chains[i].getNode(), SDNodeOrder);
889      }
890    }
891
892    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
893                        MVT::Other, &Chains[0], NumValues);
894
895    if (DisableScheduling)
896      DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
897  } else {
898    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
899      SmallVector<EVT, 4> ValueVTs;
900      ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
901      unsigned NumValues = ValueVTs.size();
902      if (NumValues == 0) continue;
903
904      SDValue RetOp = getValue(I.getOperand(i));
905      for (unsigned j = 0, f = NumValues; j != f; ++j) {
906        EVT VT = ValueVTs[j];
907
908        ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
909
910        const Function *F = I.getParent()->getParent();
911        if (F->paramHasAttr(0, Attribute::SExt))
912          ExtendKind = ISD::SIGN_EXTEND;
913        else if (F->paramHasAttr(0, Attribute::ZExt))
914          ExtendKind = ISD::ZERO_EXTEND;
915
916        // FIXME: C calling convention requires the return type to be promoted to
917        // at least 32-bit. But this is not necessary for non-C calling
918        // conventions. The frontend should mark functions whose return values
919        // require promoting with signext or zeroext attributes.
920        if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
921          EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
922          if (VT.bitsLT(MinVT))
923            VT = MinVT;
924        }
925
926        unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
927        EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
928        SmallVector<SDValue, 4> Parts(NumParts);
929        getCopyToParts(DAG, getCurDebugLoc(), SDNodeOrder,
930                       SDValue(RetOp.getNode(), RetOp.getResNo() + j),
931                       &Parts[0], NumParts, PartVT, ExtendKind);
932
933        // 'inreg' on function refers to return value
934        ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
935        if (F->paramHasAttr(0, Attribute::InReg))
936          Flags.setInReg();
937
938        // Propagate extension type if any
939        if (F->paramHasAttr(0, Attribute::SExt))
940          Flags.setSExt();
941        else if (F->paramHasAttr(0, Attribute::ZExt))
942          Flags.setZExt();
943
944        for (unsigned i = 0; i < NumParts; ++i)
945          Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
946      }
947    }
948  }
949
950  bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
951  CallingConv::ID CallConv =
952    DAG.getMachineFunction().getFunction()->getCallingConv();
953  Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
954                          Outs, getCurDebugLoc(), DAG);
955
956  // Verify that the target's LowerReturn behaved as expected.
957  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
958         "LowerReturn didn't return a valid chain!");
959
960  // Update the DAG with the new chain value resulting from return lowering.
961  DAG.setRoot(Chain);
962
963  if (DisableScheduling)
964    DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
965}
966
967/// CopyToExportRegsIfNeeded - If the given value has virtual registers
968/// created for it, emit nodes to copy the value into the virtual
969/// registers.
970void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
971  if (!V->use_empty()) {
972    DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
973    if (VMI != FuncInfo.ValueMap.end())
974      CopyValueToVirtualRegister(V, VMI->second);
975  }
976}
977
978/// ExportFromCurrentBlock - If this condition isn't known to be exported from
979/// the current basic block, add it to ValueMap now so that we'll get a
980/// CopyTo/FromReg.
981void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
982  // No need to export constants.
983  if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
984
985  // Already exported?
986  if (FuncInfo.isExportedInst(V)) return;
987
988  unsigned Reg = FuncInfo.InitializeRegForValue(V);
989  CopyValueToVirtualRegister(V, Reg);
990}
991
992bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
993                                                     const BasicBlock *FromBB) {
994  // The operands of the setcc have to be in this block.  We don't know
995  // how to export them from some other block.
996  if (Instruction *VI = dyn_cast<Instruction>(V)) {
997    // Can export from current BB.
998    if (VI->getParent() == FromBB)
999      return true;
1000
1001    // Is already exported, noop.
1002    return FuncInfo.isExportedInst(V);
1003  }
1004
1005  // If this is an argument, we can export it if the BB is the entry block or
1006  // if it is already exported.
1007  if (isa<Argument>(V)) {
1008    if (FromBB == &FromBB->getParent()->getEntryBlock())
1009      return true;
1010
1011    // Otherwise, can only export this if it is already exported.
1012    return FuncInfo.isExportedInst(V);
1013  }
1014
1015  // Otherwise, constants can always be exported.
1016  return true;
1017}
1018
1019static bool InBlock(const Value *V, const BasicBlock *BB) {
1020  if (const Instruction *I = dyn_cast<Instruction>(V))
1021    return I->getParent() == BB;
1022  return true;
1023}
1024
1025/// getFCmpCondCode - Return the ISD condition code corresponding to
1026/// the given LLVM IR floating-point condition code.  This includes
1027/// consideration of global floating-point math flags.
1028///
1029static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1030  ISD::CondCode FPC, FOC;
1031  switch (Pred) {
1032  case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1033  case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1034  case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1035  case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1036  case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1037  case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1038  case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1039  case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
1040  case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
1041  case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1042  case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1043  case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1044  case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1045  case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1046  case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1047  case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
1048  default:
1049    llvm_unreachable("Invalid FCmp predicate opcode!");
1050    FOC = FPC = ISD::SETFALSE;
1051    break;
1052  }
1053  if (FiniteOnlyFPMath())
1054    return FOC;
1055  else
1056    return FPC;
1057}
1058
1059/// getICmpCondCode - Return the ISD condition code corresponding to
1060/// the given LLVM IR integer condition code.
1061///
1062static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1063  switch (Pred) {
1064  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
1065  case ICmpInst::ICMP_NE:  return ISD::SETNE;
1066  case ICmpInst::ICMP_SLE: return ISD::SETLE;
1067  case ICmpInst::ICMP_ULE: return ISD::SETULE;
1068  case ICmpInst::ICMP_SGE: return ISD::SETGE;
1069  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1070  case ICmpInst::ICMP_SLT: return ISD::SETLT;
1071  case ICmpInst::ICMP_ULT: return ISD::SETULT;
1072  case ICmpInst::ICMP_SGT: return ISD::SETGT;
1073  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1074  default:
1075    llvm_unreachable("Invalid ICmp predicate opcode!");
1076    return ISD::SETNE;
1077  }
1078}
1079
1080/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1081/// This function emits a branch and is used at the leaves of an OR or an
1082/// AND operator tree.
1083///
1084void
1085SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
1086                                                  MachineBasicBlock *TBB,
1087                                                  MachineBasicBlock *FBB,
1088                                                  MachineBasicBlock *CurBB) {
1089  const BasicBlock *BB = CurBB->getBasicBlock();
1090
1091  // If the leaf of the tree is a comparison, merge the condition into
1092  // the caseblock.
1093  if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1094    // The operands of the cmp have to be in this block.  We don't know
1095    // how to export them from some other block.  If this is the first block
1096    // of the sequence, no exporting is needed.
1097    if (CurBB == CurMBB ||
1098        (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1099         isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1100      ISD::CondCode Condition;
1101      if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1102        Condition = getICmpCondCode(IC->getPredicate());
1103      } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1104        Condition = getFCmpCondCode(FC->getPredicate());
1105      } else {
1106        Condition = ISD::SETEQ; // silence warning.
1107        llvm_unreachable("Unknown compare instruction");
1108      }
1109
1110      CaseBlock CB(Condition, BOp->getOperand(0),
1111                   BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1112      SwitchCases.push_back(CB);
1113      return;
1114    }
1115  }
1116
1117  // Create a CaseBlock record representing this branch.
1118  CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1119               NULL, TBB, FBB, CurBB);
1120  SwitchCases.push_back(CB);
1121}
1122
1123/// FindMergedConditions - If Cond is an expression like
1124void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
1125                                               MachineBasicBlock *TBB,
1126                                               MachineBasicBlock *FBB,
1127                                               MachineBasicBlock *CurBB,
1128                                               unsigned Opc) {
1129  // If this node is not part of the or/and tree, emit it as a branch.
1130  Instruction *BOp = dyn_cast<Instruction>(Cond);
1131  if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1132      (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1133      BOp->getParent() != CurBB->getBasicBlock() ||
1134      !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1135      !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1136    EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1137    return;
1138  }
1139
1140  //  Create TmpBB after CurBB.
1141  MachineFunction::iterator BBI = CurBB;
1142  MachineFunction &MF = DAG.getMachineFunction();
1143  MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1144  CurBB->getParent()->insert(++BBI, TmpBB);
1145
1146  if (Opc == Instruction::Or) {
1147    // Codegen X | Y as:
1148    //   jmp_if_X TBB
1149    //   jmp TmpBB
1150    // TmpBB:
1151    //   jmp_if_Y TBB
1152    //   jmp FBB
1153    //
1154
1155    // Emit the LHS condition.
1156    FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1157
1158    // Emit the RHS condition into TmpBB.
1159    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1160  } else {
1161    assert(Opc == Instruction::And && "Unknown merge op!");
1162    // Codegen X & Y as:
1163    //   jmp_if_X TmpBB
1164    //   jmp FBB
1165    // TmpBB:
1166    //   jmp_if_Y TBB
1167    //   jmp FBB
1168    //
1169    //  This requires creation of TmpBB after CurBB.
1170
1171    // Emit the LHS condition.
1172    FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1173
1174    // Emit the RHS condition into TmpBB.
1175    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1176  }
1177}
1178
1179/// If the set of cases should be emitted as a series of branches, return true.
1180/// If we should emit this as a bunch of and/or'd together conditions, return
1181/// false.
1182bool
1183SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1184  if (Cases.size() != 2) return true;
1185
1186  // If this is two comparisons of the same values or'd or and'd together, they
1187  // will get folded into a single comparison, so don't emit two blocks.
1188  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1189       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1190      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1191       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1192    return false;
1193  }
1194
1195  return true;
1196}
1197
1198void SelectionDAGBuilder::visitBr(BranchInst &I) {
1199  // Update machine-CFG edges.
1200  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1201
1202  // Figure out which block is immediately after the current one.
1203  MachineBasicBlock *NextBlock = 0;
1204  MachineFunction::iterator BBI = CurMBB;
1205  if (++BBI != FuncInfo.MF->end())
1206    NextBlock = BBI;
1207
1208  if (I.isUnconditional()) {
1209    // Update machine-CFG edges.
1210    CurMBB->addSuccessor(Succ0MBB);
1211
1212    // If this is not a fall-through branch, emit the branch.
1213    if (Succ0MBB != NextBlock) {
1214      SDValue V = DAG.getNode(ISD::BR, getCurDebugLoc(),
1215                              MVT::Other, getControlRoot(),
1216                              DAG.getBasicBlock(Succ0MBB));
1217      DAG.setRoot(V);
1218
1219      if (DisableScheduling)
1220        DAG.AssignOrdering(V.getNode(), SDNodeOrder);
1221    }
1222
1223    return;
1224  }
1225
1226  // If this condition is one of the special cases we handle, do special stuff
1227  // now.
1228  Value *CondVal = I.getCondition();
1229  MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1230
1231  // If this is a series of conditions that are or'd or and'd together, emit
1232  // this as a sequence of branches instead of setcc's with and/or operations.
1233  // For example, instead of something like:
1234  //     cmp A, B
1235  //     C = seteq
1236  //     cmp D, E
1237  //     F = setle
1238  //     or C, F
1239  //     jnz foo
1240  // Emit:
1241  //     cmp A, B
1242  //     je foo
1243  //     cmp D, E
1244  //     jle foo
1245  //
1246  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1247    if (BOp->hasOneUse() &&
1248        (BOp->getOpcode() == Instruction::And ||
1249         BOp->getOpcode() == Instruction::Or)) {
1250      FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1251      // If the compares in later blocks need to use values not currently
1252      // exported from this block, export them now.  This block should always
1253      // be the first entry.
1254      assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1255
1256      // Allow some cases to be rejected.
1257      if (ShouldEmitAsBranches(SwitchCases)) {
1258        for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1259          ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1260          ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1261        }
1262
1263        // Emit the branch for this block.
1264        visitSwitchCase(SwitchCases[0]);
1265        SwitchCases.erase(SwitchCases.begin());
1266        return;
1267      }
1268
1269      // Okay, we decided not to do this, remove any inserted MBB's and clear
1270      // SwitchCases.
1271      for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1272        FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1273
1274      SwitchCases.clear();
1275    }
1276  }
1277
1278  // Create a CaseBlock record representing this branch.
1279  CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1280               NULL, Succ0MBB, Succ1MBB, CurMBB);
1281
1282  // Use visitSwitchCase to actually insert the fast branch sequence for this
1283  // cond branch.
1284  visitSwitchCase(CB);
1285}
1286
1287/// visitSwitchCase - Emits the necessary code to represent a single node in
1288/// the binary search tree resulting from lowering a switch instruction.
1289void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
1290  SDValue Cond;
1291  SDValue CondLHS = getValue(CB.CmpLHS);
1292  DebugLoc dl = getCurDebugLoc();
1293
1294  // Build the setcc now.
1295  if (CB.CmpMHS == NULL) {
1296    // Fold "(X == true)" to X and "(X == false)" to !X to
1297    // handle common cases produced by branch lowering.
1298    if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1299        CB.CC == ISD::SETEQ)
1300      Cond = CondLHS;
1301    else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1302             CB.CC == ISD::SETEQ) {
1303      SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1304      Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1305    } else
1306      Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1307  } else {
1308    assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1309
1310    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1311    const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
1312
1313    SDValue CmpOp = getValue(CB.CmpMHS);
1314    EVT VT = CmpOp.getValueType();
1315
1316    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1317      Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1318                          ISD::SETLE);
1319    } else {
1320      SDValue SUB = DAG.getNode(ISD::SUB, dl,
1321                                VT, CmpOp, DAG.getConstant(Low, VT));
1322      Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1323                          DAG.getConstant(High-Low, VT), ISD::SETULE);
1324    }
1325  }
1326
1327  if (DisableScheduling)
1328    DAG.AssignOrdering(Cond.getNode(), SDNodeOrder);
1329
1330  // Update successor info
1331  CurMBB->addSuccessor(CB.TrueBB);
1332  CurMBB->addSuccessor(CB.FalseBB);
1333
1334  // Set NextBlock to be the MBB immediately after the current one, if any.
1335  // This is used to avoid emitting unnecessary branches to the next block.
1336  MachineBasicBlock *NextBlock = 0;
1337  MachineFunction::iterator BBI = CurMBB;
1338  if (++BBI != FuncInfo.MF->end())
1339    NextBlock = BBI;
1340
1341  // If the lhs block is the next block, invert the condition so that we can
1342  // fall through to the lhs instead of the rhs block.
1343  if (CB.TrueBB == NextBlock) {
1344    std::swap(CB.TrueBB, CB.FalseBB);
1345    SDValue True = DAG.getConstant(1, Cond.getValueType());
1346    Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1347
1348    if (DisableScheduling)
1349      DAG.AssignOrdering(Cond.getNode(), SDNodeOrder);
1350  }
1351
1352  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1353                               MVT::Other, getControlRoot(), Cond,
1354                               DAG.getBasicBlock(CB.TrueBB));
1355
1356  if (DisableScheduling)
1357    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1358
1359  // If the branch was constant folded, fix up the CFG.
1360  if (BrCond.getOpcode() == ISD::BR) {
1361    CurMBB->removeSuccessor(CB.FalseBB);
1362  } else {
1363    // Otherwise, go ahead and insert the false branch.
1364    if (BrCond == getControlRoot())
1365      CurMBB->removeSuccessor(CB.TrueBB);
1366
1367    if (CB.FalseBB != NextBlock) {
1368      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1369                           DAG.getBasicBlock(CB.FalseBB));
1370
1371      if (DisableScheduling)
1372        DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1373    }
1374  }
1375
1376  DAG.setRoot(BrCond);
1377}
1378
1379/// visitJumpTable - Emit JumpTable node in the current MBB
1380void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1381  // Emit the code for the jump table
1382  assert(JT.Reg != -1U && "Should lower JT Header first!");
1383  EVT PTy = TLI.getPointerTy();
1384  SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1385                                     JT.Reg, PTy);
1386  SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1387  SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1388                                    MVT::Other, Index.getValue(1),
1389                                    Table, Index);
1390  DAG.setRoot(BrJumpTable);
1391
1392  if (DisableScheduling) {
1393    DAG.AssignOrdering(Index.getNode(), SDNodeOrder);
1394    DAG.AssignOrdering(Table.getNode(), SDNodeOrder);
1395    DAG.AssignOrdering(BrJumpTable.getNode(), SDNodeOrder);
1396  }
1397}
1398
1399/// visitJumpTableHeader - This function emits necessary code to produce index
1400/// in the JumpTable from switch case.
1401void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1402                                               JumpTableHeader &JTH) {
1403  // Subtract the lowest switch case value from the value being switched on and
1404  // conditional branch to default mbb if the result is greater than the
1405  // difference between smallest and largest cases.
1406  SDValue SwitchOp = getValue(JTH.SValue);
1407  EVT VT = SwitchOp.getValueType();
1408  SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1409                            DAG.getConstant(JTH.First, VT));
1410
1411  // The SDNode we just created, which holds the value being switched on minus
1412  // the the smallest case value, needs to be copied to a virtual register so it
1413  // can be used as an index into the jump table in a subsequent basic block.
1414  // This value may be smaller or larger than the target's pointer type, and
1415  // therefore require extension or truncating.
1416  SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
1417
1418  unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1419  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1420                                    JumpTableReg, SwitchOp);
1421  JT.Reg = JumpTableReg;
1422
1423  // Emit the range check for the jump table, and branch to the default block
1424  // for the switch statement if the value being switched on exceeds the largest
1425  // case in the switch.
1426  SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1427                             TLI.getSetCCResultType(Sub.getValueType()), Sub,
1428                             DAG.getConstant(JTH.Last-JTH.First,VT),
1429                             ISD::SETUGT);
1430
1431  if (DisableScheduling) {
1432    DAG.AssignOrdering(Sub.getNode(), SDNodeOrder);
1433    DAG.AssignOrdering(SwitchOp.getNode(), SDNodeOrder);
1434    DAG.AssignOrdering(CopyTo.getNode(), SDNodeOrder);
1435    DAG.AssignOrdering(CMP.getNode(), SDNodeOrder);
1436  }
1437
1438  // Set NextBlock to be the MBB immediately after the current one, if any.
1439  // This is used to avoid emitting unnecessary branches to the next block.
1440  MachineBasicBlock *NextBlock = 0;
1441  MachineFunction::iterator BBI = CurMBB;
1442
1443  if (++BBI != FuncInfo.MF->end())
1444    NextBlock = BBI;
1445
1446  SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1447                               MVT::Other, CopyTo, CMP,
1448                               DAG.getBasicBlock(JT.Default));
1449
1450  if (DisableScheduling)
1451    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1452
1453  if (JT.MBB != NextBlock) {
1454    BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1455                         DAG.getBasicBlock(JT.MBB));
1456
1457    if (DisableScheduling)
1458      DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1459  }
1460
1461  DAG.setRoot(BrCond);
1462}
1463
1464/// visitBitTestHeader - This function emits necessary code to produce value
1465/// suitable for "bit tests"
1466void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
1467  // Subtract the minimum value
1468  SDValue SwitchOp = getValue(B.SValue);
1469  EVT VT = SwitchOp.getValueType();
1470  SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1471                            DAG.getConstant(B.First, VT));
1472
1473  // Check range
1474  SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1475                                  TLI.getSetCCResultType(Sub.getValueType()),
1476                                  Sub, DAG.getConstant(B.Range, VT),
1477                                  ISD::SETUGT);
1478
1479  SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
1480                                       TLI.getPointerTy());
1481
1482  B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1483  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1484                                    B.Reg, ShiftOp);
1485
1486  if (DisableScheduling) {
1487    DAG.AssignOrdering(Sub.getNode(), SDNodeOrder);
1488    DAG.AssignOrdering(RangeCmp.getNode(), SDNodeOrder);
1489    DAG.AssignOrdering(ShiftOp.getNode(), SDNodeOrder);
1490    DAG.AssignOrdering(CopyTo.getNode(), SDNodeOrder);
1491  }
1492
1493  // Set NextBlock to be the MBB immediately after the current one, if any.
1494  // This is used to avoid emitting unnecessary branches to the next block.
1495  MachineBasicBlock *NextBlock = 0;
1496  MachineFunction::iterator BBI = CurMBB;
1497  if (++BBI != FuncInfo.MF->end())
1498    NextBlock = BBI;
1499
1500  MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1501
1502  CurMBB->addSuccessor(B.Default);
1503  CurMBB->addSuccessor(MBB);
1504
1505  SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1506                                MVT::Other, CopyTo, RangeCmp,
1507                                DAG.getBasicBlock(B.Default));
1508
1509  if (DisableScheduling)
1510    DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder);
1511
1512  if (MBB != NextBlock) {
1513    BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1514                          DAG.getBasicBlock(MBB));
1515
1516    if (DisableScheduling)
1517      DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder);
1518  }
1519
1520  DAG.setRoot(BrRange);
1521}
1522
1523/// visitBitTestCase - this function produces one "bit test"
1524void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
1525                                           unsigned Reg,
1526                                           BitTestCase &B) {
1527  // Make desired shift
1528  SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1529                                       TLI.getPointerTy());
1530  SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1531                                  TLI.getPointerTy(),
1532                                  DAG.getConstant(1, TLI.getPointerTy()),
1533                                  ShiftOp);
1534
1535  // Emit bit tests and jumps
1536  SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1537                              TLI.getPointerTy(), SwitchVal,
1538                              DAG.getConstant(B.Mask, TLI.getPointerTy()));
1539  SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1540                                TLI.getSetCCResultType(AndOp.getValueType()),
1541                                AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1542                                ISD::SETNE);
1543
1544  if (DisableScheduling) {
1545    DAG.AssignOrdering(ShiftOp.getNode(), SDNodeOrder);
1546    DAG.AssignOrdering(SwitchVal.getNode(), SDNodeOrder);
1547    DAG.AssignOrdering(AndOp.getNode(), SDNodeOrder);
1548    DAG.AssignOrdering(AndCmp.getNode(), SDNodeOrder);
1549  }
1550
1551  CurMBB->addSuccessor(B.TargetBB);
1552  CurMBB->addSuccessor(NextMBB);
1553
1554  SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1555                              MVT::Other, getControlRoot(),
1556                              AndCmp, DAG.getBasicBlock(B.TargetBB));
1557
1558  if (DisableScheduling)
1559    DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder);
1560
1561  // Set NextBlock to be the MBB immediately after the current one, if any.
1562  // This is used to avoid emitting unnecessary branches to the next block.
1563  MachineBasicBlock *NextBlock = 0;
1564  MachineFunction::iterator BBI = CurMBB;
1565  if (++BBI != FuncInfo.MF->end())
1566    NextBlock = BBI;
1567
1568  if (NextMBB != NextBlock) {
1569    BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1570                        DAG.getBasicBlock(NextMBB));
1571
1572    if (DisableScheduling)
1573      DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder);
1574  }
1575
1576  DAG.setRoot(BrAnd);
1577}
1578
1579void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
1580  // Retrieve successors.
1581  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1582  MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1583
1584  const Value *Callee(I.getCalledValue());
1585  if (isa<InlineAsm>(Callee))
1586    visitInlineAsm(&I);
1587  else
1588    LowerCallTo(&I, getValue(Callee), false, LandingPad);
1589
1590  // If the value of the invoke is used outside of its defining block, make it
1591  // available as a virtual register.
1592  CopyToExportRegsIfNeeded(&I);
1593
1594  // Update successor info
1595  CurMBB->addSuccessor(Return);
1596  CurMBB->addSuccessor(LandingPad);
1597
1598  // Drop into normal successor.
1599  SDValue Branch = DAG.getNode(ISD::BR, getCurDebugLoc(),
1600                               MVT::Other, getControlRoot(),
1601                               DAG.getBasicBlock(Return));
1602  DAG.setRoot(Branch);
1603
1604  if (DisableScheduling)
1605    DAG.AssignOrdering(Branch.getNode(), SDNodeOrder);
1606}
1607
1608void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
1609}
1610
1611/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1612/// small case ranges).
1613bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
1614                                                 CaseRecVector& WorkList,
1615                                                 Value* SV,
1616                                                 MachineBasicBlock* Default) {
1617  Case& BackCase  = *(CR.Range.second-1);
1618
1619  // Size is the number of Cases represented by this range.
1620  size_t Size = CR.Range.second - CR.Range.first;
1621  if (Size > 3)
1622    return false;
1623
1624  // Get the MachineFunction which holds the current MBB.  This is used when
1625  // inserting any additional MBBs necessary to represent the switch.
1626  MachineFunction *CurMF = FuncInfo.MF;
1627
1628  // Figure out which block is immediately after the current one.
1629  MachineBasicBlock *NextBlock = 0;
1630  MachineFunction::iterator BBI = CR.CaseBB;
1631
1632  if (++BBI != FuncInfo.MF->end())
1633    NextBlock = BBI;
1634
1635  // TODO: If any two of the cases has the same destination, and if one value
1636  // is the same as the other, but has one bit unset that the other has set,
1637  // use bit manipulation to do two compares at once.  For example:
1638  // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1639
1640  // Rearrange the case blocks so that the last one falls through if possible.
1641  if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1642    // The last case block won't fall through into 'NextBlock' if we emit the
1643    // branches in this order.  See if rearranging a case value would help.
1644    for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1645      if (I->BB == NextBlock) {
1646        std::swap(*I, BackCase);
1647        break;
1648      }
1649    }
1650  }
1651
1652  // Create a CaseBlock record representing a conditional branch to
1653  // the Case's target mbb if the value being switched on SV is equal
1654  // to C.
1655  MachineBasicBlock *CurBlock = CR.CaseBB;
1656  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1657    MachineBasicBlock *FallThrough;
1658    if (I != E-1) {
1659      FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1660      CurMF->insert(BBI, FallThrough);
1661
1662      // Put SV in a virtual register to make it available from the new blocks.
1663      ExportFromCurrentBlock(SV);
1664    } else {
1665      // If the last case doesn't match, go to the default block.
1666      FallThrough = Default;
1667    }
1668
1669    Value *RHS, *LHS, *MHS;
1670    ISD::CondCode CC;
1671    if (I->High == I->Low) {
1672      // This is just small small case range :) containing exactly 1 case
1673      CC = ISD::SETEQ;
1674      LHS = SV; RHS = I->High; MHS = NULL;
1675    } else {
1676      CC = ISD::SETLE;
1677      LHS = I->Low; MHS = SV; RHS = I->High;
1678    }
1679    CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1680
1681    // If emitting the first comparison, just call visitSwitchCase to emit the
1682    // code into the current block.  Otherwise, push the CaseBlock onto the
1683    // vector to be later processed by SDISel, and insert the node's MBB
1684    // before the next MBB.
1685    if (CurBlock == CurMBB)
1686      visitSwitchCase(CB);
1687    else
1688      SwitchCases.push_back(CB);
1689
1690    CurBlock = FallThrough;
1691  }
1692
1693  return true;
1694}
1695
1696static inline bool areJTsAllowed(const TargetLowering &TLI) {
1697  return !DisableJumpTables &&
1698          (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1699           TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1700}
1701
1702static APInt ComputeRange(const APInt &First, const APInt &Last) {
1703  APInt LastExt(Last), FirstExt(First);
1704  uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1705  LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1706  return (LastExt - FirstExt + 1ULL);
1707}
1708
1709/// handleJTSwitchCase - Emit jumptable for current switch case range
1710bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
1711                                             CaseRecVector& WorkList,
1712                                             Value* SV,
1713                                             MachineBasicBlock* Default) {
1714  Case& FrontCase = *CR.Range.first;
1715  Case& BackCase  = *(CR.Range.second-1);
1716
1717  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1718  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1719
1720  APInt TSize(First.getBitWidth(), 0);
1721  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1722       I!=E; ++I)
1723    TSize += I->size();
1724
1725  if (!areJTsAllowed(TLI) || TSize.ult(APInt(First.getBitWidth(), 4)))
1726    return false;
1727
1728  APInt Range = ComputeRange(First, Last);
1729  double Density = TSize.roundToDouble() / Range.roundToDouble();
1730  if (Density < 0.4)
1731    return false;
1732
1733  DEBUG(errs() << "Lowering jump table\n"
1734               << "First entry: " << First << ". Last entry: " << Last << '\n'
1735               << "Range: " << Range
1736               << "Size: " << TSize << ". Density: " << Density << "\n\n");
1737
1738  // Get the MachineFunction which holds the current MBB.  This is used when
1739  // inserting any additional MBBs necessary to represent the switch.
1740  MachineFunction *CurMF = FuncInfo.MF;
1741
1742  // Figure out which block is immediately after the current one.
1743  MachineFunction::iterator BBI = CR.CaseBB;
1744  ++BBI;
1745
1746  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1747
1748  // Create a new basic block to hold the code for loading the address
1749  // of the jump table, and jumping to it.  Update successor information;
1750  // we will either branch to the default case for the switch, or the jump
1751  // table.
1752  MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1753  CurMF->insert(BBI, JumpTableBB);
1754  CR.CaseBB->addSuccessor(Default);
1755  CR.CaseBB->addSuccessor(JumpTableBB);
1756
1757  // Build a vector of destination BBs, corresponding to each target
1758  // of the jump table. If the value of the jump table slot corresponds to
1759  // a case statement, push the case's BB onto the vector, otherwise, push
1760  // the default BB.
1761  std::vector<MachineBasicBlock*> DestBBs;
1762  APInt TEI = First;
1763  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1764    const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1765    const APInt& High = cast<ConstantInt>(I->High)->getValue();
1766
1767    if (Low.sle(TEI) && TEI.sle(High)) {
1768      DestBBs.push_back(I->BB);
1769      if (TEI==High)
1770        ++I;
1771    } else {
1772      DestBBs.push_back(Default);
1773    }
1774  }
1775
1776  // Update successor info. Add one edge to each unique successor.
1777  BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1778  for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1779         E = DestBBs.end(); I != E; ++I) {
1780    if (!SuccsHandled[(*I)->getNumber()]) {
1781      SuccsHandled[(*I)->getNumber()] = true;
1782      JumpTableBB->addSuccessor(*I);
1783    }
1784  }
1785
1786  // Create a jump table index for this jump table, or return an existing
1787  // one.
1788  unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1789
1790  // Set the jump table information so that we can codegen it as a second
1791  // MachineBasicBlock
1792  JumpTable JT(-1U, JTI, JumpTableBB, Default);
1793  JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1794  if (CR.CaseBB == CurMBB)
1795    visitJumpTableHeader(JT, JTH);
1796
1797  JTCases.push_back(JumpTableBlock(JTH, JT));
1798
1799  return true;
1800}
1801
1802/// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1803/// 2 subtrees.
1804bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
1805                                                  CaseRecVector& WorkList,
1806                                                  Value* SV,
1807                                                  MachineBasicBlock* Default) {
1808  // Get the MachineFunction which holds the current MBB.  This is used when
1809  // inserting any additional MBBs necessary to represent the switch.
1810  MachineFunction *CurMF = FuncInfo.MF;
1811
1812  // Figure out which block is immediately after the current one.
1813  MachineFunction::iterator BBI = CR.CaseBB;
1814  ++BBI;
1815
1816  Case& FrontCase = *CR.Range.first;
1817  Case& BackCase  = *(CR.Range.second-1);
1818  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1819
1820  // Size is the number of Cases represented by this range.
1821  unsigned Size = CR.Range.second - CR.Range.first;
1822
1823  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1824  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1825  double FMetric = 0;
1826  CaseItr Pivot = CR.Range.first + Size/2;
1827
1828  // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1829  // (heuristically) allow us to emit JumpTable's later.
1830  APInt TSize(First.getBitWidth(), 0);
1831  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1832       I!=E; ++I)
1833    TSize += I->size();
1834
1835  APInt LSize = FrontCase.size();
1836  APInt RSize = TSize-LSize;
1837  DEBUG(errs() << "Selecting best pivot: \n"
1838               << "First: " << First << ", Last: " << Last <<'\n'
1839               << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1840  for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1841       J!=E; ++I, ++J) {
1842    const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
1843    const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
1844    APInt Range = ComputeRange(LEnd, RBegin);
1845    assert((Range - 2ULL).isNonNegative() &&
1846           "Invalid case distance");
1847    double LDensity = (double)LSize.roundToDouble() /
1848                           (LEnd - First + 1ULL).roundToDouble();
1849    double RDensity = (double)RSize.roundToDouble() /
1850                           (Last - RBegin + 1ULL).roundToDouble();
1851    double Metric = Range.logBase2()*(LDensity+RDensity);
1852    // Should always split in some non-trivial place
1853    DEBUG(errs() <<"=>Step\n"
1854                 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1855                 << "LDensity: " << LDensity
1856                 << ", RDensity: " << RDensity << '\n'
1857                 << "Metric: " << Metric << '\n');
1858    if (FMetric < Metric) {
1859      Pivot = J;
1860      FMetric = Metric;
1861      DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1862    }
1863
1864    LSize += J->size();
1865    RSize -= J->size();
1866  }
1867  if (areJTsAllowed(TLI)) {
1868    // If our case is dense we *really* should handle it earlier!
1869    assert((FMetric > 0) && "Should handle dense range earlier!");
1870  } else {
1871    Pivot = CR.Range.first + Size/2;
1872  }
1873
1874  CaseRange LHSR(CR.Range.first, Pivot);
1875  CaseRange RHSR(Pivot, CR.Range.second);
1876  Constant *C = Pivot->Low;
1877  MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1878
1879  // We know that we branch to the LHS if the Value being switched on is
1880  // less than the Pivot value, C.  We use this to optimize our binary
1881  // tree a bit, by recognizing that if SV is greater than or equal to the
1882  // LHS's Case Value, and that Case Value is exactly one less than the
1883  // Pivot's Value, then we can branch directly to the LHS's Target,
1884  // rather than creating a leaf node for it.
1885  if ((LHSR.second - LHSR.first) == 1 &&
1886      LHSR.first->High == CR.GE &&
1887      cast<ConstantInt>(C)->getValue() ==
1888      (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1889    TrueBB = LHSR.first->BB;
1890  } else {
1891    TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1892    CurMF->insert(BBI, TrueBB);
1893    WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1894
1895    // Put SV in a virtual register to make it available from the new blocks.
1896    ExportFromCurrentBlock(SV);
1897  }
1898
1899  // Similar to the optimization above, if the Value being switched on is
1900  // known to be less than the Constant CR.LT, and the current Case Value
1901  // is CR.LT - 1, then we can branch directly to the target block for
1902  // the current Case Value, rather than emitting a RHS leaf node for it.
1903  if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1904      cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1905      (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1906    FalseBB = RHSR.first->BB;
1907  } else {
1908    FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1909    CurMF->insert(BBI, FalseBB);
1910    WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1911
1912    // Put SV in a virtual register to make it available from the new blocks.
1913    ExportFromCurrentBlock(SV);
1914  }
1915
1916  // Create a CaseBlock record representing a conditional branch to
1917  // the LHS node if the value being switched on SV is less than C.
1918  // Otherwise, branch to LHS.
1919  CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1920
1921  if (CR.CaseBB == CurMBB)
1922    visitSwitchCase(CB);
1923  else
1924    SwitchCases.push_back(CB);
1925
1926  return true;
1927}
1928
1929/// handleBitTestsSwitchCase - if current case range has few destination and
1930/// range span less, than machine word bitwidth, encode case range into series
1931/// of masks and emit bit tests with these masks.
1932bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
1933                                                   CaseRecVector& WorkList,
1934                                                   Value* SV,
1935                                                   MachineBasicBlock* Default){
1936  EVT PTy = TLI.getPointerTy();
1937  unsigned IntPtrBits = PTy.getSizeInBits();
1938
1939  Case& FrontCase = *CR.Range.first;
1940  Case& BackCase  = *(CR.Range.second-1);
1941
1942  // Get the MachineFunction which holds the current MBB.  This is used when
1943  // inserting any additional MBBs necessary to represent the switch.
1944  MachineFunction *CurMF = FuncInfo.MF;
1945
1946  // If target does not have legal shift left, do not emit bit tests at all.
1947  if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1948    return false;
1949
1950  size_t numCmps = 0;
1951  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1952       I!=E; ++I) {
1953    // Single case counts one, case range - two.
1954    numCmps += (I->Low == I->High ? 1 : 2);
1955  }
1956
1957  // Count unique destinations
1958  SmallSet<MachineBasicBlock*, 4> Dests;
1959  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1960    Dests.insert(I->BB);
1961    if (Dests.size() > 3)
1962      // Don't bother the code below, if there are too much unique destinations
1963      return false;
1964  }
1965  DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1966               << "Total number of comparisons: " << numCmps << '\n');
1967
1968  // Compute span of values.
1969  const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1970  const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1971  APInt cmpRange = maxValue - minValue;
1972
1973  DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1974               << "Low bound: " << minValue << '\n'
1975               << "High bound: " << maxValue << '\n');
1976
1977  if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1978      (!(Dests.size() == 1 && numCmps >= 3) &&
1979       !(Dests.size() == 2 && numCmps >= 5) &&
1980       !(Dests.size() >= 3 && numCmps >= 6)))
1981    return false;
1982
1983  DEBUG(errs() << "Emitting bit tests\n");
1984  APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1985
1986  // Optimize the case where all the case values fit in a
1987  // word without having to subtract minValue. In this case,
1988  // we can optimize away the subtraction.
1989  if (minValue.isNonNegative() &&
1990      maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1991    cmpRange = maxValue;
1992  } else {
1993    lowBound = minValue;
1994  }
1995
1996  CaseBitsVector CasesBits;
1997  unsigned i, count = 0;
1998
1999  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2000    MachineBasicBlock* Dest = I->BB;
2001    for (i = 0; i < count; ++i)
2002      if (Dest == CasesBits[i].BB)
2003        break;
2004
2005    if (i == count) {
2006      assert((count < 3) && "Too much destinations to test!");
2007      CasesBits.push_back(CaseBits(0, Dest, 0));
2008      count++;
2009    }
2010
2011    const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
2012    const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
2013
2014    uint64_t lo = (lowValue - lowBound).getZExtValue();
2015    uint64_t hi = (highValue - lowBound).getZExtValue();
2016
2017    for (uint64_t j = lo; j <= hi; j++) {
2018      CasesBits[i].Mask |=  1ULL << j;
2019      CasesBits[i].Bits++;
2020    }
2021
2022  }
2023  std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2024
2025  BitTestInfo BTC;
2026
2027  // Figure out which block is immediately after the current one.
2028  MachineFunction::iterator BBI = CR.CaseBB;
2029  ++BBI;
2030
2031  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2032
2033  DEBUG(errs() << "Cases:\n");
2034  for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2035    DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2036                 << ", Bits: " << CasesBits[i].Bits
2037                 << ", BB: " << CasesBits[i].BB << '\n');
2038
2039    MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2040    CurMF->insert(BBI, CaseBB);
2041    BTC.push_back(BitTestCase(CasesBits[i].Mask,
2042                              CaseBB,
2043                              CasesBits[i].BB));
2044
2045    // Put SV in a virtual register to make it available from the new blocks.
2046    ExportFromCurrentBlock(SV);
2047  }
2048
2049  BitTestBlock BTB(lowBound, cmpRange, SV,
2050                   -1U, (CR.CaseBB == CurMBB),
2051                   CR.CaseBB, Default, BTC);
2052
2053  if (CR.CaseBB == CurMBB)
2054    visitBitTestHeader(BTB);
2055
2056  BitTestCases.push_back(BTB);
2057
2058  return true;
2059}
2060
2061/// Clusterify - Transform simple list of Cases into list of CaseRange's
2062size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
2063                                       const SwitchInst& SI) {
2064  size_t numCmps = 0;
2065
2066  // Start with "simple" cases
2067  for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2068    MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2069    Cases.push_back(Case(SI.getSuccessorValue(i),
2070                         SI.getSuccessorValue(i),
2071                         SMBB));
2072  }
2073  std::sort(Cases.begin(), Cases.end(), CaseCmp());
2074
2075  // Merge case into clusters
2076  if (Cases.size() >= 2)
2077    // Must recompute end() each iteration because it may be
2078    // invalidated by erase if we hold on to it
2079    for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2080      const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2081      const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2082      MachineBasicBlock* nextBB = J->BB;
2083      MachineBasicBlock* currentBB = I->BB;
2084
2085      // If the two neighboring cases go to the same destination, merge them
2086      // into a single case.
2087      if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2088        I->High = J->High;
2089        J = Cases.erase(J);
2090      } else {
2091        I = J++;
2092      }
2093    }
2094
2095  for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2096    if (I->Low != I->High)
2097      // A range counts double, since it requires two compares.
2098      ++numCmps;
2099  }
2100
2101  return numCmps;
2102}
2103
2104void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
2105  // Figure out which block is immediately after the current one.
2106  MachineBasicBlock *NextBlock = 0;
2107  MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2108
2109  // If there is only the default destination, branch to it if it is not the
2110  // next basic block.  Otherwise, just fall through.
2111  if (SI.getNumOperands() == 2) {
2112    // Update machine-CFG edges.
2113
2114    // If this is not a fall-through branch, emit the branch.
2115    CurMBB->addSuccessor(Default);
2116    if (Default != NextBlock) {
2117      SDValue Res = DAG.getNode(ISD::BR, getCurDebugLoc(),
2118                                MVT::Other, getControlRoot(),
2119                                DAG.getBasicBlock(Default));
2120      DAG.setRoot(Res);
2121
2122      if (DisableScheduling)
2123        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2124    }
2125
2126    return;
2127  }
2128
2129  // If there are any non-default case statements, create a vector of Cases
2130  // representing each one, and sort the vector so that we can efficiently
2131  // create a binary search tree from them.
2132  CaseVector Cases;
2133  size_t numCmps = Clusterify(Cases, SI);
2134  DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2135               << ". Total compares: " << numCmps << '\n');
2136  numCmps = 0;
2137
2138  // Get the Value to be switched on and default basic blocks, which will be
2139  // inserted into CaseBlock records, representing basic blocks in the binary
2140  // search tree.
2141  Value *SV = SI.getOperand(0);
2142
2143  // Push the initial CaseRec onto the worklist
2144  CaseRecVector WorkList;
2145  WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2146
2147  while (!WorkList.empty()) {
2148    // Grab a record representing a case range to process off the worklist
2149    CaseRec CR = WorkList.back();
2150    WorkList.pop_back();
2151
2152    if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2153      continue;
2154
2155    // If the range has few cases (two or less) emit a series of specific
2156    // tests.
2157    if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2158      continue;
2159
2160    // If the switch has more than 5 blocks, and at least 40% dense, and the
2161    // target supports indirect branches, then emit a jump table rather than
2162    // lowering the switch to a binary tree of conditional branches.
2163    if (handleJTSwitchCase(CR, WorkList, SV, Default))
2164      continue;
2165
2166    // Emit binary tree. We need to pick a pivot, and push left and right ranges
2167    // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2168    handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2169  }
2170}
2171
2172void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
2173  // Update machine-CFG edges.
2174  for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
2175    CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]);
2176
2177  SDValue Res = DAG.getNode(ISD::BRIND, getCurDebugLoc(),
2178                            MVT::Other, getControlRoot(),
2179                            getValue(I.getAddress()));
2180  DAG.setRoot(Res);
2181
2182  if (DisableScheduling)
2183    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2184}
2185
2186void SelectionDAGBuilder::visitFSub(User &I) {
2187  // -0.0 - X --> fneg
2188  const Type *Ty = I.getType();
2189  if (isa<VectorType>(Ty)) {
2190    if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2191      const VectorType *DestTy = cast<VectorType>(I.getType());
2192      const Type *ElTy = DestTy->getElementType();
2193      unsigned VL = DestTy->getNumElements();
2194      std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2195      Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2196      if (CV == CNZ) {
2197        SDValue Op2 = getValue(I.getOperand(1));
2198        SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2199                                  Op2.getValueType(), Op2);
2200        setValue(&I, Res);
2201
2202        if (DisableScheduling)
2203          DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2204
2205        return;
2206      }
2207    }
2208  }
2209
2210  if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2211    if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2212      SDValue Op2 = getValue(I.getOperand(1));
2213      SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2214                                Op2.getValueType(), Op2);
2215      setValue(&I, Res);
2216
2217      if (DisableScheduling)
2218        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2219
2220      return;
2221    }
2222
2223  visitBinary(I, ISD::FSUB);
2224}
2225
2226void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
2227  SDValue Op1 = getValue(I.getOperand(0));
2228  SDValue Op2 = getValue(I.getOperand(1));
2229  SDValue Res = DAG.getNode(OpCode, getCurDebugLoc(),
2230                            Op1.getValueType(), Op1, Op2);
2231  setValue(&I, Res);
2232
2233  if (DisableScheduling)
2234    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2235}
2236
2237void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
2238  SDValue Op1 = getValue(I.getOperand(0));
2239  SDValue Op2 = getValue(I.getOperand(1));
2240  if (!isa<VectorType>(I.getType()) &&
2241      Op2.getValueType() != TLI.getShiftAmountTy()) {
2242    // If the operand is smaller than the shift count type, promote it.
2243    EVT PTy = TLI.getPointerTy();
2244    EVT STy = TLI.getShiftAmountTy();
2245    if (STy.bitsGT(Op2.getValueType()))
2246      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2247                        TLI.getShiftAmountTy(), Op2);
2248    // If the operand is larger than the shift count type but the shift
2249    // count type has enough bits to represent any shift value, truncate
2250    // it now. This is a common case and it exposes the truncate to
2251    // optimization early.
2252    else if (STy.getSizeInBits() >=
2253             Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2254      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2255                        TLI.getShiftAmountTy(), Op2);
2256    // Otherwise we'll need to temporarily settle for some other
2257    // convenient type; type legalization will make adjustments as
2258    // needed.
2259    else if (PTy.bitsLT(Op2.getValueType()))
2260      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2261                        TLI.getPointerTy(), Op2);
2262    else if (PTy.bitsGT(Op2.getValueType()))
2263      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2264                        TLI.getPointerTy(), Op2);
2265  }
2266
2267  SDValue Res = DAG.getNode(Opcode, getCurDebugLoc(),
2268                            Op1.getValueType(), Op1, Op2);
2269  setValue(&I, Res);
2270
2271  if (DisableScheduling) {
2272    DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
2273    DAG.AssignOrdering(Op2.getNode(), SDNodeOrder);
2274    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2275  }
2276}
2277
2278void SelectionDAGBuilder::visitICmp(User &I) {
2279  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2280  if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2281    predicate = IC->getPredicate();
2282  else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2283    predicate = ICmpInst::Predicate(IC->getPredicate());
2284  SDValue Op1 = getValue(I.getOperand(0));
2285  SDValue Op2 = getValue(I.getOperand(1));
2286  ISD::CondCode Opcode = getICmpCondCode(predicate);
2287
2288  EVT DestVT = TLI.getValueType(I.getType());
2289  SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode);
2290  setValue(&I, Res);
2291
2292  if (DisableScheduling)
2293    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2294}
2295
2296void SelectionDAGBuilder::visitFCmp(User &I) {
2297  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2298  if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2299    predicate = FC->getPredicate();
2300  else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2301    predicate = FCmpInst::Predicate(FC->getPredicate());
2302  SDValue Op1 = getValue(I.getOperand(0));
2303  SDValue Op2 = getValue(I.getOperand(1));
2304  ISD::CondCode Condition = getFCmpCondCode(predicate);
2305  EVT DestVT = TLI.getValueType(I.getType());
2306  SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition);
2307  setValue(&I, Res);
2308
2309  if (DisableScheduling)
2310    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2311}
2312
2313void SelectionDAGBuilder::visitSelect(User &I) {
2314  SmallVector<EVT, 4> ValueVTs;
2315  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2316  unsigned NumValues = ValueVTs.size();
2317  if (NumValues == 0) return;
2318
2319  SmallVector<SDValue, 4> Values(NumValues);
2320  SDValue Cond     = getValue(I.getOperand(0));
2321  SDValue TrueVal  = getValue(I.getOperand(1));
2322  SDValue FalseVal = getValue(I.getOperand(2));
2323
2324  for (unsigned i = 0; i != NumValues; ++i) {
2325    Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2326                            TrueVal.getNode()->getValueType(i), Cond,
2327                            SDValue(TrueVal.getNode(),
2328                                    TrueVal.getResNo() + i),
2329                            SDValue(FalseVal.getNode(),
2330                                    FalseVal.getResNo() + i));
2331
2332    if (DisableScheduling)
2333      DAG.AssignOrdering(Values[i].getNode(), SDNodeOrder);
2334  }
2335
2336  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2337                            DAG.getVTList(&ValueVTs[0], NumValues),
2338                            &Values[0], NumValues);
2339  setValue(&I, Res);
2340
2341  if (DisableScheduling)
2342    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2343}
2344
2345void SelectionDAGBuilder::visitTrunc(User &I) {
2346  // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2347  SDValue N = getValue(I.getOperand(0));
2348  EVT DestVT = TLI.getValueType(I.getType());
2349  SDValue Res = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2350  setValue(&I, Res);
2351
2352  if (DisableScheduling)
2353    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2354}
2355
2356void SelectionDAGBuilder::visitZExt(User &I) {
2357  // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2358  // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2359  SDValue N = getValue(I.getOperand(0));
2360  EVT DestVT = TLI.getValueType(I.getType());
2361  SDValue Res = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2362  setValue(&I, Res);
2363
2364  if (DisableScheduling)
2365    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2366}
2367
2368void SelectionDAGBuilder::visitSExt(User &I) {
2369  // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2370  // SExt also can't be a cast to bool for same reason. So, nothing much to do
2371  SDValue N = getValue(I.getOperand(0));
2372  EVT DestVT = TLI.getValueType(I.getType());
2373  SDValue Res = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N);
2374  setValue(&I, Res);
2375
2376  if (DisableScheduling)
2377    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2378}
2379
2380void SelectionDAGBuilder::visitFPTrunc(User &I) {
2381  // FPTrunc is never a no-op cast, no need to check
2382  SDValue N = getValue(I.getOperand(0));
2383  EVT DestVT = TLI.getValueType(I.getType());
2384  SDValue Res = DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2385                            DestVT, N, DAG.getIntPtrConstant(0));
2386  setValue(&I, Res);
2387
2388  if (DisableScheduling)
2389    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2390}
2391
2392void SelectionDAGBuilder::visitFPExt(User &I){
2393  // FPTrunc is never a no-op cast, no need to check
2394  SDValue N = getValue(I.getOperand(0));
2395  EVT DestVT = TLI.getValueType(I.getType());
2396  SDValue Res = DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N);
2397  setValue(&I, Res);
2398
2399  if (DisableScheduling)
2400    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2401}
2402
2403void SelectionDAGBuilder::visitFPToUI(User &I) {
2404  // FPToUI is never a no-op cast, no need to check
2405  SDValue N = getValue(I.getOperand(0));
2406  EVT DestVT = TLI.getValueType(I.getType());
2407  SDValue Res = DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N);
2408  setValue(&I, Res);
2409
2410  if (DisableScheduling)
2411    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2412}
2413
2414void SelectionDAGBuilder::visitFPToSI(User &I) {
2415  // FPToSI is never a no-op cast, no need to check
2416  SDValue N = getValue(I.getOperand(0));
2417  EVT DestVT = TLI.getValueType(I.getType());
2418  SDValue Res = DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N);
2419  setValue(&I, Res);
2420
2421  if (DisableScheduling)
2422    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2423}
2424
2425void SelectionDAGBuilder::visitUIToFP(User &I) {
2426  // UIToFP is never a no-op cast, no need to check
2427  SDValue N = getValue(I.getOperand(0));
2428  EVT DestVT = TLI.getValueType(I.getType());
2429  SDValue Res = DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N);
2430  setValue(&I, Res);
2431
2432  if (DisableScheduling)
2433    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2434}
2435
2436void SelectionDAGBuilder::visitSIToFP(User &I){
2437  // SIToFP is never a no-op cast, no need to check
2438  SDValue N = getValue(I.getOperand(0));
2439  EVT DestVT = TLI.getValueType(I.getType());
2440  SDValue Res = DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N);
2441  setValue(&I, Res);
2442
2443  if (DisableScheduling)
2444    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2445}
2446
2447void SelectionDAGBuilder::visitPtrToInt(User &I) {
2448  // What to do depends on the size of the integer and the size of the pointer.
2449  // We can either truncate, zero extend, or no-op, accordingly.
2450  SDValue N = getValue(I.getOperand(0));
2451  EVT SrcVT = N.getValueType();
2452  EVT DestVT = TLI.getValueType(I.getType());
2453  SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2454  setValue(&I, Res);
2455
2456  if (DisableScheduling)
2457    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2458}
2459
2460void SelectionDAGBuilder::visitIntToPtr(User &I) {
2461  // What to do depends on the size of the integer and the size of the pointer.
2462  // We can either truncate, zero extend, or no-op, accordingly.
2463  SDValue N = getValue(I.getOperand(0));
2464  EVT SrcVT = N.getValueType();
2465  EVT DestVT = TLI.getValueType(I.getType());
2466  SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2467  setValue(&I, Res);
2468
2469  if (DisableScheduling)
2470    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2471}
2472
2473void SelectionDAGBuilder::visitBitCast(User &I) {
2474  SDValue N = getValue(I.getOperand(0));
2475  EVT DestVT = TLI.getValueType(I.getType());
2476
2477  // BitCast assures us that source and destination are the same size so this is
2478  // either a BIT_CONVERT or a no-op.
2479  if (DestVT != N.getValueType()) {
2480    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2481                              DestVT, N); // convert types.
2482    setValue(&I, Res);
2483
2484    if (DisableScheduling)
2485      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2486  } else {
2487    setValue(&I, N);            // noop cast.
2488  }
2489}
2490
2491void SelectionDAGBuilder::visitInsertElement(User &I) {
2492  SDValue InVec = getValue(I.getOperand(0));
2493  SDValue InVal = getValue(I.getOperand(1));
2494  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2495                              TLI.getPointerTy(),
2496                              getValue(I.getOperand(2)));
2497  SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2498                            TLI.getValueType(I.getType()),
2499                            InVec, InVal, InIdx);
2500  setValue(&I, Res);
2501
2502  if (DisableScheduling) {
2503    DAG.AssignOrdering(InIdx.getNode(), SDNodeOrder);
2504    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2505  }
2506}
2507
2508void SelectionDAGBuilder::visitExtractElement(User &I) {
2509  SDValue InVec = getValue(I.getOperand(0));
2510  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2511                              TLI.getPointerTy(),
2512                              getValue(I.getOperand(1)));
2513  SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2514                            TLI.getValueType(I.getType()), InVec, InIdx);
2515  setValue(&I, Res);
2516
2517  if (DisableScheduling) {
2518    DAG.AssignOrdering(InIdx.getNode(), SDNodeOrder);
2519    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2520  }
2521}
2522
2523
2524// Utility for visitShuffleVector - Returns true if the mask is mask starting
2525// from SIndx and increasing to the element length (undefs are allowed).
2526static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2527  unsigned MaskNumElts = Mask.size();
2528  for (unsigned i = 0; i != MaskNumElts; ++i)
2529    if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2530      return false;
2531  return true;
2532}
2533
2534void SelectionDAGBuilder::visitShuffleVector(User &I) {
2535  SmallVector<int, 8> Mask;
2536  SDValue Src1 = getValue(I.getOperand(0));
2537  SDValue Src2 = getValue(I.getOperand(1));
2538
2539  // Convert the ConstantVector mask operand into an array of ints, with -1
2540  // representing undef values.
2541  SmallVector<Constant*, 8> MaskElts;
2542  cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2543                                                     MaskElts);
2544  unsigned MaskNumElts = MaskElts.size();
2545  for (unsigned i = 0; i != MaskNumElts; ++i) {
2546    if (isa<UndefValue>(MaskElts[i]))
2547      Mask.push_back(-1);
2548    else
2549      Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2550  }
2551
2552  EVT VT = TLI.getValueType(I.getType());
2553  EVT SrcVT = Src1.getValueType();
2554  unsigned SrcNumElts = SrcVT.getVectorNumElements();
2555
2556  if (SrcNumElts == MaskNumElts) {
2557    SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2558                                       &Mask[0]);
2559    setValue(&I, Res);
2560
2561    if (DisableScheduling)
2562      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2563
2564    return;
2565  }
2566
2567  // Normalize the shuffle vector since mask and vector length don't match.
2568  if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2569    // Mask is longer than the source vectors and is a multiple of the source
2570    // vectors.  We can use concatenate vector to make the mask and vectors
2571    // lengths match.
2572    if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2573      // The shuffle is concatenating two vectors together.
2574      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2575                                VT, Src1, Src2);
2576      setValue(&I, Res);
2577
2578      if (DisableScheduling)
2579        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2580
2581      return;
2582    }
2583
2584    // Pad both vectors with undefs to make them the same length as the mask.
2585    unsigned NumConcat = MaskNumElts / SrcNumElts;
2586    bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2587    bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2588    SDValue UndefVal = DAG.getUNDEF(SrcVT);
2589
2590    SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2591    SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2592    MOps1[0] = Src1;
2593    MOps2[0] = Src2;
2594
2595    Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2596                                                  getCurDebugLoc(), VT,
2597                                                  &MOps1[0], NumConcat);
2598    Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2599                                                  getCurDebugLoc(), VT,
2600                                                  &MOps2[0], NumConcat);
2601
2602    // Readjust mask for new input vector length.
2603    SmallVector<int, 8> MappedOps;
2604    for (unsigned i = 0; i != MaskNumElts; ++i) {
2605      int Idx = Mask[i];
2606      if (Idx < (int)SrcNumElts)
2607        MappedOps.push_back(Idx);
2608      else
2609        MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2610    }
2611
2612    SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2613                                       &MappedOps[0]);
2614    setValue(&I, Res);
2615
2616    if (DisableScheduling) {
2617      DAG.AssignOrdering(Src1.getNode(), SDNodeOrder);
2618      DAG.AssignOrdering(Src2.getNode(), SDNodeOrder);
2619      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2620    }
2621
2622    return;
2623  }
2624
2625  if (SrcNumElts > MaskNumElts) {
2626    // Analyze the access pattern of the vector to see if we can extract
2627    // two subvectors and do the shuffle. The analysis is done by calculating
2628    // the range of elements the mask access on both vectors.
2629    int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2630    int MaxRange[2] = {-1, -1};
2631
2632    for (unsigned i = 0; i != MaskNumElts; ++i) {
2633      int Idx = Mask[i];
2634      int Input = 0;
2635      if (Idx < 0)
2636        continue;
2637
2638      if (Idx >= (int)SrcNumElts) {
2639        Input = 1;
2640        Idx -= SrcNumElts;
2641      }
2642      if (Idx > MaxRange[Input])
2643        MaxRange[Input] = Idx;
2644      if (Idx < MinRange[Input])
2645        MinRange[Input] = Idx;
2646    }
2647
2648    // Check if the access is smaller than the vector size and can we find
2649    // a reasonable extract index.
2650    int RangeUse[2] = { 2, 2 };  // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2651    int StartIdx[2];  // StartIdx to extract from
2652    for (int Input=0; Input < 2; ++Input) {
2653      if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2654        RangeUse[Input] = 0; // Unused
2655        StartIdx[Input] = 0;
2656      } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2657        // Fits within range but we should see if we can find a good
2658        // start index that is a multiple of the mask length.
2659        if (MaxRange[Input] < (int)MaskNumElts) {
2660          RangeUse[Input] = 1; // Extract from beginning of the vector
2661          StartIdx[Input] = 0;
2662        } else {
2663          StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2664          if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2665              StartIdx[Input] + MaskNumElts < SrcNumElts)
2666            RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2667        }
2668      }
2669    }
2670
2671    if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2672      SDValue Res = DAG.getUNDEF(VT);
2673      setValue(&I, Res);  // Vectors are not used.
2674
2675      if (DisableScheduling)
2676        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2677
2678      return;
2679    }
2680    else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2681      // Extract appropriate subvector and generate a vector shuffle
2682      for (int Input=0; Input < 2; ++Input) {
2683        SDValue &Src = Input == 0 ? Src1 : Src2;
2684        if (RangeUse[Input] == 0)
2685          Src = DAG.getUNDEF(VT);
2686        else
2687          Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2688                            Src, DAG.getIntPtrConstant(StartIdx[Input]));
2689
2690        if (DisableScheduling)
2691          DAG.AssignOrdering(Src.getNode(), SDNodeOrder);
2692      }
2693
2694      // Calculate new mask.
2695      SmallVector<int, 8> MappedOps;
2696      for (unsigned i = 0; i != MaskNumElts; ++i) {
2697        int Idx = Mask[i];
2698        if (Idx < 0)
2699          MappedOps.push_back(Idx);
2700        else if (Idx < (int)SrcNumElts)
2701          MappedOps.push_back(Idx - StartIdx[0]);
2702        else
2703          MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2704      }
2705
2706      SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2707                                         &MappedOps[0]);
2708      setValue(&I, Res);
2709
2710      if (DisableScheduling)
2711        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2712
2713      return;
2714    }
2715  }
2716
2717  // We can't use either concat vectors or extract subvectors so fall back to
2718  // replacing the shuffle with extract and build vector.
2719  // to insert and build vector.
2720  EVT EltVT = VT.getVectorElementType();
2721  EVT PtrVT = TLI.getPointerTy();
2722  SmallVector<SDValue,8> Ops;
2723  for (unsigned i = 0; i != MaskNumElts; ++i) {
2724    if (Mask[i] < 0) {
2725      Ops.push_back(DAG.getUNDEF(EltVT));
2726    } else {
2727      int Idx = Mask[i];
2728      SDValue Res;
2729
2730      if (Idx < (int)SrcNumElts)
2731        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2732                          EltVT, Src1, DAG.getConstant(Idx, PtrVT));
2733      else
2734        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2735                          EltVT, Src2,
2736                          DAG.getConstant(Idx - SrcNumElts, PtrVT));
2737
2738      Ops.push_back(Res);
2739
2740      if (DisableScheduling)
2741        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2742    }
2743  }
2744
2745  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2746                            VT, &Ops[0], Ops.size());
2747  setValue(&I, Res);
2748
2749  if (DisableScheduling)
2750    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2751}
2752
2753void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
2754  const Value *Op0 = I.getOperand(0);
2755  const Value *Op1 = I.getOperand(1);
2756  const Type *AggTy = I.getType();
2757  const Type *ValTy = Op1->getType();
2758  bool IntoUndef = isa<UndefValue>(Op0);
2759  bool FromUndef = isa<UndefValue>(Op1);
2760
2761  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2762                                            I.idx_begin(), I.idx_end());
2763
2764  SmallVector<EVT, 4> AggValueVTs;
2765  ComputeValueVTs(TLI, AggTy, AggValueVTs);
2766  SmallVector<EVT, 4> ValValueVTs;
2767  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2768
2769  unsigned NumAggValues = AggValueVTs.size();
2770  unsigned NumValValues = ValValueVTs.size();
2771  SmallVector<SDValue, 4> Values(NumAggValues);
2772
2773  SDValue Agg = getValue(Op0);
2774  SDValue Val = getValue(Op1);
2775  unsigned i = 0;
2776  // Copy the beginning value(s) from the original aggregate.
2777  for (; i != LinearIndex; ++i)
2778    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2779                SDValue(Agg.getNode(), Agg.getResNo() + i);
2780  // Copy values from the inserted value(s).
2781  for (; i != LinearIndex + NumValValues; ++i)
2782    Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2783                SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2784  // Copy remaining value(s) from the original aggregate.
2785  for (; i != NumAggValues; ++i)
2786    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2787                SDValue(Agg.getNode(), Agg.getResNo() + i);
2788
2789  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2790                            DAG.getVTList(&AggValueVTs[0], NumAggValues),
2791                            &Values[0], NumAggValues);
2792  setValue(&I, Res);
2793
2794  if (DisableScheduling)
2795    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2796}
2797
2798void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
2799  const Value *Op0 = I.getOperand(0);
2800  const Type *AggTy = Op0->getType();
2801  const Type *ValTy = I.getType();
2802  bool OutOfUndef = isa<UndefValue>(Op0);
2803
2804  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2805                                            I.idx_begin(), I.idx_end());
2806
2807  SmallVector<EVT, 4> ValValueVTs;
2808  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2809
2810  unsigned NumValValues = ValValueVTs.size();
2811  SmallVector<SDValue, 4> Values(NumValValues);
2812
2813  SDValue Agg = getValue(Op0);
2814  // Copy out the selected value(s).
2815  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2816    Values[i - LinearIndex] =
2817      OutOfUndef ?
2818        DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2819        SDValue(Agg.getNode(), Agg.getResNo() + i);
2820
2821  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2822                            DAG.getVTList(&ValValueVTs[0], NumValValues),
2823                            &Values[0], NumValValues);
2824  setValue(&I, Res);
2825
2826  if (DisableScheduling)
2827    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2828}
2829
2830void SelectionDAGBuilder::visitGetElementPtr(User &I) {
2831  SDValue N = getValue(I.getOperand(0));
2832  const Type *Ty = I.getOperand(0)->getType();
2833
2834  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2835       OI != E; ++OI) {
2836    Value *Idx = *OI;
2837    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2838      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2839      if (Field) {
2840        // N = N + Offset
2841        uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2842        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2843                        DAG.getIntPtrConstant(Offset));
2844
2845        if (DisableScheduling)
2846          DAG.AssignOrdering(N.getNode(), SDNodeOrder);
2847      }
2848
2849      Ty = StTy->getElementType(Field);
2850    } else {
2851      Ty = cast<SequentialType>(Ty)->getElementType();
2852
2853      // If this is a constant subscript, handle it quickly.
2854      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2855        if (CI->getZExtValue() == 0) continue;
2856        uint64_t Offs =
2857            TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2858        SDValue OffsVal;
2859        EVT PTy = TLI.getPointerTy();
2860        unsigned PtrBits = PTy.getSizeInBits();
2861        if (PtrBits < 64)
2862          OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2863                                TLI.getPointerTy(),
2864                                DAG.getConstant(Offs, MVT::i64));
2865        else
2866          OffsVal = DAG.getIntPtrConstant(Offs);
2867
2868        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2869                        OffsVal);
2870
2871        if (DisableScheduling) {
2872          DAG.AssignOrdering(OffsVal.getNode(), SDNodeOrder);
2873          DAG.AssignOrdering(N.getNode(), SDNodeOrder);
2874        }
2875
2876        continue;
2877      }
2878
2879      // N = N + Idx * ElementSize;
2880      APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
2881                                TD->getTypeAllocSize(Ty));
2882      SDValue IdxN = getValue(Idx);
2883
2884      // If the index is smaller or larger than intptr_t, truncate or extend
2885      // it.
2886      IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
2887
2888      // If this is a multiply by a power of two, turn it into a shl
2889      // immediately.  This is a very common case.
2890      if (ElementSize != 1) {
2891        if (ElementSize.isPowerOf2()) {
2892          unsigned Amt = ElementSize.logBase2();
2893          IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2894                             N.getValueType(), IdxN,
2895                             DAG.getConstant(Amt, TLI.getPointerTy()));
2896        } else {
2897          SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
2898          IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2899                             N.getValueType(), IdxN, Scale);
2900        }
2901
2902        if (DisableScheduling)
2903          DAG.AssignOrdering(IdxN.getNode(), SDNodeOrder);
2904      }
2905
2906      N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2907                      N.getValueType(), N, IdxN);
2908
2909      if (DisableScheduling)
2910        DAG.AssignOrdering(N.getNode(), SDNodeOrder);
2911    }
2912  }
2913
2914  setValue(&I, N);
2915}
2916
2917void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
2918  // If this is a fixed sized alloca in the entry block of the function,
2919  // allocate it statically on the stack.
2920  if (FuncInfo.StaticAllocaMap.count(&I))
2921    return;   // getValue will auto-populate this.
2922
2923  const Type *Ty = I.getAllocatedType();
2924  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2925  unsigned Align =
2926    std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2927             I.getAlignment());
2928
2929  SDValue AllocSize = getValue(I.getArraySize());
2930
2931  AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2932                          AllocSize,
2933                          DAG.getConstant(TySize, AllocSize.getValueType()));
2934
2935  if (DisableScheduling)
2936    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2937
2938  EVT IntPtr = TLI.getPointerTy();
2939  AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
2940
2941  if (DisableScheduling)
2942    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2943
2944  // Handle alignment.  If the requested alignment is less than or equal to
2945  // the stack alignment, ignore it.  If the size is greater than or equal to
2946  // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2947  unsigned StackAlign =
2948    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2949  if (Align <= StackAlign)
2950    Align = 0;
2951
2952  // Round the size of the allocation up to the stack alignment size
2953  // by add SA-1 to the size.
2954  AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2955                          AllocSize.getValueType(), AllocSize,
2956                          DAG.getIntPtrConstant(StackAlign-1));
2957  if (DisableScheduling)
2958    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2959
2960  // Mask out the low bits for alignment purposes.
2961  AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2962                          AllocSize.getValueType(), AllocSize,
2963                          DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2964  if (DisableScheduling)
2965    DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder);
2966
2967  SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2968  SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2969  SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2970                            VTs, Ops, 3);
2971  setValue(&I, DSA);
2972  DAG.setRoot(DSA.getValue(1));
2973
2974  if (DisableScheduling)
2975    DAG.AssignOrdering(DSA.getNode(), SDNodeOrder);
2976
2977  // Inform the Frame Information that we have just allocated a variable-sized
2978  // object.
2979  FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2980}
2981
2982void SelectionDAGBuilder::visitLoad(LoadInst &I) {
2983  const Value *SV = I.getOperand(0);
2984  SDValue Ptr = getValue(SV);
2985
2986  const Type *Ty = I.getType();
2987  bool isVolatile = I.isVolatile();
2988  unsigned Alignment = I.getAlignment();
2989
2990  SmallVector<EVT, 4> ValueVTs;
2991  SmallVector<uint64_t, 4> Offsets;
2992  ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2993  unsigned NumValues = ValueVTs.size();
2994  if (NumValues == 0)
2995    return;
2996
2997  SDValue Root;
2998  bool ConstantMemory = false;
2999  if (I.isVolatile())
3000    // Serialize volatile loads with other side effects.
3001    Root = getRoot();
3002  else if (AA->pointsToConstantMemory(SV)) {
3003    // Do not serialize (non-volatile) loads of constant memory with anything.
3004    Root = DAG.getEntryNode();
3005    ConstantMemory = true;
3006  } else {
3007    // Do not serialize non-volatile loads against each other.
3008    Root = DAG.getRoot();
3009  }
3010
3011  SmallVector<SDValue, 4> Values(NumValues);
3012  SmallVector<SDValue, 4> Chains(NumValues);
3013  EVT PtrVT = Ptr.getValueType();
3014  for (unsigned i = 0; i != NumValues; ++i) {
3015    SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(),
3016                            PtrVT, Ptr,
3017                            DAG.getConstant(Offsets[i], PtrVT));
3018    SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
3019                            A, SV, Offsets[i], isVolatile, Alignment);
3020
3021    Values[i] = L;
3022    Chains[i] = L.getValue(1);
3023
3024    if (DisableScheduling) {
3025      DAG.AssignOrdering(A.getNode(), SDNodeOrder);
3026      DAG.AssignOrdering(L.getNode(), SDNodeOrder);
3027    }
3028  }
3029
3030  if (!ConstantMemory) {
3031    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
3032                                MVT::Other, &Chains[0], NumValues);
3033    if (isVolatile)
3034      DAG.setRoot(Chain);
3035    else
3036      PendingLoads.push_back(Chain);
3037
3038    if (DisableScheduling)
3039      DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
3040  }
3041
3042  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
3043                            DAG.getVTList(&ValueVTs[0], NumValues),
3044                            &Values[0], NumValues);
3045  setValue(&I, Res);
3046
3047  if (DisableScheduling)
3048    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
3049}
3050
3051void SelectionDAGBuilder::visitStore(StoreInst &I) {
3052  Value *SrcV = I.getOperand(0);
3053  Value *PtrV = I.getOperand(1);
3054
3055  SmallVector<EVT, 4> ValueVTs;
3056  SmallVector<uint64_t, 4> Offsets;
3057  ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
3058  unsigned NumValues = ValueVTs.size();
3059  if (NumValues == 0)
3060    return;
3061
3062  // Get the lowered operands. Note that we do this after
3063  // checking if NumResults is zero, because with zero results
3064  // the operands won't have values in the map.
3065  SDValue Src = getValue(SrcV);
3066  SDValue Ptr = getValue(PtrV);
3067
3068  SDValue Root = getRoot();
3069  SmallVector<SDValue, 4> Chains(NumValues);
3070  EVT PtrVT = Ptr.getValueType();
3071  bool isVolatile = I.isVolatile();
3072  unsigned Alignment = I.getAlignment();
3073
3074  for (unsigned i = 0; i != NumValues; ++i) {
3075    SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr,
3076                              DAG.getConstant(Offsets[i], PtrVT));
3077    Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
3078                             SDValue(Src.getNode(), Src.getResNo() + i),
3079                             Add, PtrV, Offsets[i], isVolatile, Alignment);
3080
3081    if (DisableScheduling) {
3082      DAG.AssignOrdering(Add.getNode(), SDNodeOrder);
3083      DAG.AssignOrdering(Chains[i].getNode(), SDNodeOrder);
3084    }
3085  }
3086
3087  SDValue Res = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
3088                            MVT::Other, &Chains[0], NumValues);
3089  DAG.setRoot(Res);
3090
3091  if (DisableScheduling)
3092    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
3093}
3094
3095/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
3096/// node.
3097void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
3098                                               unsigned Intrinsic) {
3099  bool HasChain = !I.doesNotAccessMemory();
3100  bool OnlyLoad = HasChain && I.onlyReadsMemory();
3101
3102  // Build the operand list.
3103  SmallVector<SDValue, 8> Ops;
3104  if (HasChain) {  // If this intrinsic has side-effects, chainify it.
3105    if (OnlyLoad) {
3106      // We don't need to serialize loads against other loads.
3107      Ops.push_back(DAG.getRoot());
3108    } else {
3109      Ops.push_back(getRoot());
3110    }
3111  }
3112
3113  // Info is set by getTgtMemInstrinsic
3114  TargetLowering::IntrinsicInfo Info;
3115  bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
3116
3117  // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
3118  if (!IsTgtIntrinsic)
3119    Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
3120
3121  // Add all operands of the call to the operand list.
3122  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
3123    SDValue Op = getValue(I.getOperand(i));
3124    assert(TLI.isTypeLegal(Op.getValueType()) &&
3125           "Intrinsic uses a non-legal type?");
3126    Ops.push_back(Op);
3127  }
3128
3129  SmallVector<EVT, 4> ValueVTs;
3130  ComputeValueVTs(TLI, I.getType(), ValueVTs);
3131#ifndef NDEBUG
3132  for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
3133    assert(TLI.isTypeLegal(ValueVTs[Val]) &&
3134           "Intrinsic uses a non-legal type?");
3135  }
3136#endif // NDEBUG
3137
3138  if (HasChain)
3139    ValueVTs.push_back(MVT::Other);
3140
3141  SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
3142
3143  // Create the node.
3144  SDValue Result;
3145  if (IsTgtIntrinsic) {
3146    // This is target intrinsic that touches memory
3147    Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
3148                                     VTs, &Ops[0], Ops.size(),
3149                                     Info.memVT, Info.ptrVal, Info.offset,
3150                                     Info.align, Info.vol,
3151                                     Info.readMem, Info.writeMem);
3152  } else if (!HasChain) {
3153    Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
3154                         VTs, &Ops[0], Ops.size());
3155  } else if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
3156    Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
3157                         VTs, &Ops[0], Ops.size());
3158  } else {
3159    Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
3160                         VTs, &Ops[0], Ops.size());
3161  }
3162
3163  if (DisableScheduling)
3164    DAG.AssignOrdering(Result.getNode(), SDNodeOrder);
3165
3166  if (HasChain) {
3167    SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
3168    if (OnlyLoad)
3169      PendingLoads.push_back(Chain);
3170    else
3171      DAG.setRoot(Chain);
3172  }
3173
3174  if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
3175    if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
3176      EVT VT = TLI.getValueType(PTy);
3177      Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
3178
3179      if (DisableScheduling)
3180        DAG.AssignOrdering(Result.getNode(), SDNodeOrder);
3181    }
3182
3183    setValue(&I, Result);
3184  }
3185}
3186
3187/// GetSignificand - Get the significand and build it into a floating-point
3188/// number with exponent of 1:
3189///
3190///   Op = (Op & 0x007fffff) | 0x3f800000;
3191///
3192/// where Op is the hexidecimal representation of floating point value.
3193static SDValue
3194GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl, unsigned Order) {
3195  SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3196                           DAG.getConstant(0x007fffff, MVT::i32));
3197  SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3198                           DAG.getConstant(0x3f800000, MVT::i32));
3199  SDValue Res = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3200
3201  if (DisableScheduling) {
3202    DAG.AssignOrdering(t1.getNode(), Order);
3203    DAG.AssignOrdering(t2.getNode(), Order);
3204    DAG.AssignOrdering(Res.getNode(), Order);
3205  }
3206
3207  return Res;
3208}
3209
3210/// GetExponent - Get the exponent:
3211///
3212///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3213///
3214/// where Op is the hexidecimal representation of floating point value.
3215static SDValue
3216GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3217            DebugLoc dl, unsigned Order) {
3218  SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3219                           DAG.getConstant(0x7f800000, MVT::i32));
3220  SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3221                           DAG.getConstant(23, TLI.getPointerTy()));
3222  SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3223                           DAG.getConstant(127, MVT::i32));
3224  SDValue Res = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3225
3226  if (DisableScheduling) {
3227    DAG.AssignOrdering(t0.getNode(), Order);
3228    DAG.AssignOrdering(t1.getNode(), Order);
3229    DAG.AssignOrdering(t2.getNode(), Order);
3230    DAG.AssignOrdering(Res.getNode(), Order);
3231  }
3232
3233  return Res;
3234}
3235
3236/// getF32Constant - Get 32-bit floating point constant.
3237static SDValue
3238getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3239  return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3240}
3241
3242/// Inlined utility function to implement binary input atomic intrinsics for
3243/// visitIntrinsicCall: I is a call instruction
3244///                     Op is the associated NodeType for I
3245const char *
3246SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3247  SDValue Root = getRoot();
3248  SDValue L =
3249    DAG.getAtomic(Op, getCurDebugLoc(),
3250                  getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3251                  Root,
3252                  getValue(I.getOperand(1)),
3253                  getValue(I.getOperand(2)),
3254                  I.getOperand(1));
3255  setValue(&I, L);
3256  DAG.setRoot(L.getValue(1));
3257
3258  if (DisableScheduling)
3259    DAG.AssignOrdering(L.getNode(), SDNodeOrder);
3260
3261  return 0;
3262}
3263
3264// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3265const char *
3266SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3267  SDValue Op1 = getValue(I.getOperand(1));
3268  SDValue Op2 = getValue(I.getOperand(2));
3269
3270  SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3271  SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3272
3273  setValue(&I, Result);
3274
3275  if (DisableScheduling)
3276    DAG.AssignOrdering(Result.getNode(), SDNodeOrder);
3277
3278  return 0;
3279}
3280
3281/// visitExp - Lower an exp intrinsic. Handles the special sequences for
3282/// limited-precision mode.
3283void
3284SelectionDAGBuilder::visitExp(CallInst &I) {
3285  SDValue result;
3286  DebugLoc dl = getCurDebugLoc();
3287
3288  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3289      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3290    SDValue Op = getValue(I.getOperand(1));
3291
3292    // Put the exponent in the right bit position for later addition to the
3293    // final result:
3294    //
3295    //   #define LOG2OFe 1.4426950f
3296    //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3297    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3298                             getF32Constant(DAG, 0x3fb8aa3b));
3299    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3300
3301    //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3302    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3303    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3304
3305    if (DisableScheduling) {
3306      DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3307      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3308      DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3309      DAG.AssignOrdering(X.getNode(), SDNodeOrder);
3310    }
3311
3312    //   IntegerPartOfX <<= 23;
3313    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3314                                 DAG.getConstant(23, TLI.getPointerTy()));
3315
3316    if (DisableScheduling)
3317      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3318
3319    if (LimitFloatPrecision <= 6) {
3320      // For floating-point precision of 6:
3321      //
3322      //   TwoToFractionalPartOfX =
3323      //     0.997535578f +
3324      //       (0.735607626f + 0.252464424f * x) * x;
3325      //
3326      // error 0.0144103317, which is 6 bits
3327      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3328                               getF32Constant(DAG, 0x3e814304));
3329      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3330                               getF32Constant(DAG, 0x3f3c50c8));
3331      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3332      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3333                               getF32Constant(DAG, 0x3f7f5e7e));
3334      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3335
3336      // Add the exponent into the result in integer domain.
3337      SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3338                               TwoToFracPartOfX, IntegerPartOfX);
3339
3340      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3341
3342      if (DisableScheduling) {
3343        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3344        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3345        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3346        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3347        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3348        DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder);
3349        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3350      }
3351    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3352      // For floating-point precision of 12:
3353      //
3354      //   TwoToFractionalPartOfX =
3355      //     0.999892986f +
3356      //       (0.696457318f +
3357      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3358      //
3359      // 0.000107046256 error, which is 13 to 14 bits
3360      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3361                               getF32Constant(DAG, 0x3da235e3));
3362      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3363                               getF32Constant(DAG, 0x3e65b8f3));
3364      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3365      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3366                               getF32Constant(DAG, 0x3f324b07));
3367      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3368      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3369                               getF32Constant(DAG, 0x3f7ff8fd));
3370      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3371
3372      // Add the exponent into the result in integer domain.
3373      SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3374                               TwoToFracPartOfX, IntegerPartOfX);
3375
3376      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3377
3378      if (DisableScheduling) {
3379        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3380        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3381        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3382        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3383        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3384        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3385        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3386        DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder);
3387        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3388      }
3389    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3390      // For floating-point precision of 18:
3391      //
3392      //   TwoToFractionalPartOfX =
3393      //     0.999999982f +
3394      //       (0.693148872f +
3395      //         (0.240227044f +
3396      //           (0.554906021e-1f +
3397      //             (0.961591928e-2f +
3398      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3399      //
3400      // error 2.47208000*10^(-7), which is better than 18 bits
3401      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3402                               getF32Constant(DAG, 0x3924b03e));
3403      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3404                               getF32Constant(DAG, 0x3ab24b87));
3405      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3406      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3407                               getF32Constant(DAG, 0x3c1d8c17));
3408      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3409      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3410                               getF32Constant(DAG, 0x3d634a1d));
3411      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3412      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3413                               getF32Constant(DAG, 0x3e75fe14));
3414      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3415      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3416                                getF32Constant(DAG, 0x3f317234));
3417      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3418      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3419                                getF32Constant(DAG, 0x3f800000));
3420      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3421                                             MVT::i32, t13);
3422
3423      // Add the exponent into the result in integer domain.
3424      SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3425                                TwoToFracPartOfX, IntegerPartOfX);
3426
3427      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3428
3429      if (DisableScheduling) {
3430        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3431        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3432        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3433        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3434        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3435        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3436        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3437        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
3438        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
3439        DAG.AssignOrdering(t11.getNode(), SDNodeOrder);
3440        DAG.AssignOrdering(t12.getNode(), SDNodeOrder);
3441        DAG.AssignOrdering(t13.getNode(), SDNodeOrder);
3442        DAG.AssignOrdering(t14.getNode(), SDNodeOrder);
3443        DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder);
3444        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3445      }
3446    }
3447  } else {
3448    // No special expansion.
3449    result = DAG.getNode(ISD::FEXP, dl,
3450                         getValue(I.getOperand(1)).getValueType(),
3451                         getValue(I.getOperand(1)));
3452    if (DisableScheduling)
3453      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3454  }
3455
3456  setValue(&I, result);
3457}
3458
3459/// visitLog - Lower a log intrinsic. Handles the special sequences for
3460/// limited-precision mode.
3461void
3462SelectionDAGBuilder::visitLog(CallInst &I) {
3463  SDValue result;
3464  DebugLoc dl = getCurDebugLoc();
3465
3466  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3467      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3468    SDValue Op = getValue(I.getOperand(1));
3469    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3470
3471    if (DisableScheduling)
3472      DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
3473
3474    // Scale the exponent by log(2) [0.69314718f].
3475    SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3476    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3477                                        getF32Constant(DAG, 0x3f317218));
3478
3479    if (DisableScheduling)
3480      DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder);
3481
3482    // Get the significand and build it into a floating-point number with
3483    // exponent of 1.
3484    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3485
3486    if (LimitFloatPrecision <= 6) {
3487      // For floating-point precision of 6:
3488      //
3489      //   LogofMantissa =
3490      //     -1.1609546f +
3491      //       (1.4034025f - 0.23903021f * x) * x;
3492      //
3493      // error 0.0034276066, which is better than 8 bits
3494      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3495                               getF32Constant(DAG, 0xbe74c456));
3496      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3497                               getF32Constant(DAG, 0x3fb3a2b1));
3498      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3499      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3500                                          getF32Constant(DAG, 0x3f949a29));
3501
3502      result = DAG.getNode(ISD::FADD, dl,
3503                           MVT::f32, LogOfExponent, LogOfMantissa);
3504
3505      if (DisableScheduling) {
3506        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3507        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3508        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3509        DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder);
3510        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3511      }
3512    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3513      // For floating-point precision of 12:
3514      //
3515      //   LogOfMantissa =
3516      //     -1.7417939f +
3517      //       (2.8212026f +
3518      //         (-1.4699568f +
3519      //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3520      //
3521      // error 0.000061011436, which is 14 bits
3522      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3523                               getF32Constant(DAG, 0xbd67b6d6));
3524      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3525                               getF32Constant(DAG, 0x3ee4f4b8));
3526      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3527      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3528                               getF32Constant(DAG, 0x3fbc278b));
3529      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3530      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3531                               getF32Constant(DAG, 0x40348e95));
3532      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3533      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3534                                          getF32Constant(DAG, 0x3fdef31a));
3535
3536      result = DAG.getNode(ISD::FADD, dl,
3537                           MVT::f32, LogOfExponent, LogOfMantissa);
3538
3539      if (DisableScheduling) {
3540        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3541        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3542        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3543        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3544        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3545        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3546        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3547        DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder);
3548        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3549      }
3550    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3551      // For floating-point precision of 18:
3552      //
3553      //   LogOfMantissa =
3554      //     -2.1072184f +
3555      //       (4.2372794f +
3556      //         (-3.7029485f +
3557      //           (2.2781945f +
3558      //             (-0.87823314f +
3559      //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3560      //
3561      // error 0.0000023660568, which is better than 18 bits
3562      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3563                               getF32Constant(DAG, 0xbc91e5ac));
3564      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3565                               getF32Constant(DAG, 0x3e4350aa));
3566      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3567      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3568                               getF32Constant(DAG, 0x3f60d3e3));
3569      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3570      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3571                               getF32Constant(DAG, 0x4011cdf0));
3572      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3573      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3574                               getF32Constant(DAG, 0x406cfd1c));
3575      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3576      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3577                               getF32Constant(DAG, 0x408797cb));
3578      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3579      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3580                                          getF32Constant(DAG, 0x4006dcab));
3581
3582      result = DAG.getNode(ISD::FADD, dl,
3583                           MVT::f32, LogOfExponent, LogOfMantissa);
3584
3585      if (DisableScheduling) {
3586        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3587        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3588        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3589        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3590        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3591        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3592        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3593        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3594        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3595        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
3596        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
3597        DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder);
3598        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3599      }
3600    }
3601  } else {
3602    // No special expansion.
3603    result = DAG.getNode(ISD::FLOG, dl,
3604                         getValue(I.getOperand(1)).getValueType(),
3605                         getValue(I.getOperand(1)));
3606
3607    if (DisableScheduling)
3608      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3609  }
3610
3611  setValue(&I, result);
3612}
3613
3614/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3615/// limited-precision mode.
3616void
3617SelectionDAGBuilder::visitLog2(CallInst &I) {
3618  SDValue result;
3619  DebugLoc dl = getCurDebugLoc();
3620
3621  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3622      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3623    SDValue Op = getValue(I.getOperand(1));
3624    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3625
3626    if (DisableScheduling)
3627      DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
3628
3629    // Get the exponent.
3630    SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3631
3632    if (DisableScheduling)
3633      DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder);
3634
3635    // Get the significand and build it into a floating-point number with
3636    // exponent of 1.
3637    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3638
3639    // Different possible minimax approximations of significand in
3640    // floating-point for various degrees of accuracy over [1,2].
3641    if (LimitFloatPrecision <= 6) {
3642      // For floating-point precision of 6:
3643      //
3644      //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3645      //
3646      // error 0.0049451742, which is more than 7 bits
3647      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3648                               getF32Constant(DAG, 0xbeb08fe0));
3649      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3650                               getF32Constant(DAG, 0x40019463));
3651      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3652      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3653                                           getF32Constant(DAG, 0x3fd6633d));
3654
3655      result = DAG.getNode(ISD::FADD, dl,
3656                           MVT::f32, LogOfExponent, Log2ofMantissa);
3657
3658      if (DisableScheduling) {
3659        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3660        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3661        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3662        DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder);
3663        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3664      }
3665    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3666      // For floating-point precision of 12:
3667      //
3668      //   Log2ofMantissa =
3669      //     -2.51285454f +
3670      //       (4.07009056f +
3671      //         (-2.12067489f +
3672      //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3673      //
3674      // error 0.0000876136000, which is better than 13 bits
3675      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3676                               getF32Constant(DAG, 0xbda7262e));
3677      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3678                               getF32Constant(DAG, 0x3f25280b));
3679      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3680      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3681                               getF32Constant(DAG, 0x4007b923));
3682      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3683      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3684                               getF32Constant(DAG, 0x40823e2f));
3685      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3686      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3687                                           getF32Constant(DAG, 0x4020d29c));
3688
3689      result = DAG.getNode(ISD::FADD, dl,
3690                           MVT::f32, LogOfExponent, Log2ofMantissa);
3691
3692      if (DisableScheduling) {
3693        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3694        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3695        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3696        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3697        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3698        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3699        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3700        DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder);
3701        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3702      }
3703    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3704      // For floating-point precision of 18:
3705      //
3706      //   Log2ofMantissa =
3707      //     -3.0400495f +
3708      //       (6.1129976f +
3709      //         (-5.3420409f +
3710      //           (3.2865683f +
3711      //             (-1.2669343f +
3712      //               (0.27515199f -
3713      //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3714      //
3715      // error 0.0000018516, which is better than 18 bits
3716      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3717                               getF32Constant(DAG, 0xbcd2769e));
3718      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3719                               getF32Constant(DAG, 0x3e8ce0b9));
3720      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3721      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3722                               getF32Constant(DAG, 0x3fa22ae7));
3723      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3724      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3725                               getF32Constant(DAG, 0x40525723));
3726      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3727      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3728                               getF32Constant(DAG, 0x40aaf200));
3729      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3730      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3731                               getF32Constant(DAG, 0x40c39dad));
3732      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3733      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3734                                           getF32Constant(DAG, 0x4042902c));
3735
3736      result = DAG.getNode(ISD::FADD, dl,
3737                           MVT::f32, LogOfExponent, Log2ofMantissa);
3738
3739      if (DisableScheduling) {
3740        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3741        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3742        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3743        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3744        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3745        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3746        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3747        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3748        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3749        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
3750        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
3751        DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder);
3752        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3753      }
3754    }
3755  } else {
3756    // No special expansion.
3757    result = DAG.getNode(ISD::FLOG2, dl,
3758                         getValue(I.getOperand(1)).getValueType(),
3759                         getValue(I.getOperand(1)));
3760
3761    if (DisableScheduling)
3762      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3763  }
3764
3765  setValue(&I, result);
3766}
3767
3768/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3769/// limited-precision mode.
3770void
3771SelectionDAGBuilder::visitLog10(CallInst &I) {
3772  SDValue result;
3773  DebugLoc dl = getCurDebugLoc();
3774
3775  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3776      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3777    SDValue Op = getValue(I.getOperand(1));
3778    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3779
3780    if (DisableScheduling)
3781      DAG.AssignOrdering(Op1.getNode(), SDNodeOrder);
3782
3783    // Scale the exponent by log10(2) [0.30102999f].
3784    SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3785    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3786                                        getF32Constant(DAG, 0x3e9a209a));
3787
3788    if (DisableScheduling)
3789      DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder);
3790
3791    // Get the significand and build it into a floating-point number with
3792    // exponent of 1.
3793    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3794
3795    if (LimitFloatPrecision <= 6) {
3796      // For floating-point precision of 6:
3797      //
3798      //   Log10ofMantissa =
3799      //     -0.50419619f +
3800      //       (0.60948995f - 0.10380950f * x) * x;
3801      //
3802      // error 0.0014886165, which is 6 bits
3803      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3804                               getF32Constant(DAG, 0xbdd49a13));
3805      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3806                               getF32Constant(DAG, 0x3f1c0789));
3807      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3808      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3809                                            getF32Constant(DAG, 0x3f011300));
3810
3811      result = DAG.getNode(ISD::FADD, dl,
3812                           MVT::f32, LogOfExponent, Log10ofMantissa);
3813
3814      if (DisableScheduling) {
3815        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3816        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3817        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3818        DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder);
3819        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3820      }
3821    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3822      // For floating-point precision of 12:
3823      //
3824      //   Log10ofMantissa =
3825      //     -0.64831180f +
3826      //       (0.91751397f +
3827      //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3828      //
3829      // error 0.00019228036, which is better than 12 bits
3830      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3831                               getF32Constant(DAG, 0x3d431f31));
3832      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3833                               getF32Constant(DAG, 0x3ea21fb2));
3834      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3835      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3836                               getF32Constant(DAG, 0x3f6ae232));
3837      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3838      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3839                                            getF32Constant(DAG, 0x3f25f7c3));
3840
3841      result = DAG.getNode(ISD::FADD, dl,
3842                           MVT::f32, LogOfExponent, Log10ofMantissa);
3843
3844      if (DisableScheduling) {
3845        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3846        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3847        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3848        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3849        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3850        DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder);
3851        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3852      }
3853    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3854      // For floating-point precision of 18:
3855      //
3856      //   Log10ofMantissa =
3857      //     -0.84299375f +
3858      //       (1.5327582f +
3859      //         (-1.0688956f +
3860      //           (0.49102474f +
3861      //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3862      //
3863      // error 0.0000037995730, which is better than 18 bits
3864      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3865                               getF32Constant(DAG, 0x3c5d51ce));
3866      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3867                               getF32Constant(DAG, 0x3e00685a));
3868      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3869      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3870                               getF32Constant(DAG, 0x3efb6798));
3871      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3872      SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3873                               getF32Constant(DAG, 0x3f88d192));
3874      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3875      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3876                               getF32Constant(DAG, 0x3fc4316c));
3877      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3878      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3879                                            getF32Constant(DAG, 0x3f57ce70));
3880
3881      result = DAG.getNode(ISD::FADD, dl,
3882                           MVT::f32, LogOfExponent, Log10ofMantissa);
3883
3884      if (DisableScheduling) {
3885        DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
3886        DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3887        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3888        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3889        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3890        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3891        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3892        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
3893        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
3894        DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder);
3895        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3896      }
3897    }
3898  } else {
3899    // No special expansion.
3900    result = DAG.getNode(ISD::FLOG10, dl,
3901                         getValue(I.getOperand(1)).getValueType(),
3902                         getValue(I.getOperand(1)));
3903
3904    if (DisableScheduling)
3905      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3906  }
3907
3908  setValue(&I, result);
3909}
3910
3911/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3912/// limited-precision mode.
3913void
3914SelectionDAGBuilder::visitExp2(CallInst &I) {
3915  SDValue result;
3916  DebugLoc dl = getCurDebugLoc();
3917
3918  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3919      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3920    SDValue Op = getValue(I.getOperand(1));
3921
3922    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3923
3924    if (DisableScheduling)
3925      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3926
3927    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3928    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3929    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3930
3931    //   IntegerPartOfX <<= 23;
3932    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3933                                 DAG.getConstant(23, TLI.getPointerTy()));
3934
3935    if (DisableScheduling) {
3936      DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
3937      DAG.AssignOrdering(X.getNode(), SDNodeOrder);
3938      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
3939    }
3940
3941    if (LimitFloatPrecision <= 6) {
3942      // For floating-point precision of 6:
3943      //
3944      //   TwoToFractionalPartOfX =
3945      //     0.997535578f +
3946      //       (0.735607626f + 0.252464424f * x) * x;
3947      //
3948      // error 0.0144103317, which is 6 bits
3949      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3950                               getF32Constant(DAG, 0x3e814304));
3951      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3952                               getF32Constant(DAG, 0x3f3c50c8));
3953      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3954      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3955                               getF32Constant(DAG, 0x3f7f5e7e));
3956      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3957      SDValue TwoToFractionalPartOfX =
3958        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3959
3960      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3961                           MVT::f32, TwoToFractionalPartOfX);
3962
3963      if (DisableScheduling) {
3964        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
3965        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
3966        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
3967        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
3968        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
3969        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
3970        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
3971      }
3972    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3973      // For floating-point precision of 12:
3974      //
3975      //   TwoToFractionalPartOfX =
3976      //     0.999892986f +
3977      //       (0.696457318f +
3978      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3979      //
3980      // error 0.000107046256, which is 13 to 14 bits
3981      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3982                               getF32Constant(DAG, 0x3da235e3));
3983      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3984                               getF32Constant(DAG, 0x3e65b8f3));
3985      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3986      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3987                               getF32Constant(DAG, 0x3f324b07));
3988      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3989      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3990                               getF32Constant(DAG, 0x3f7ff8fd));
3991      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3992      SDValue TwoToFractionalPartOfX =
3993        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3994
3995      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3996                           MVT::f32, TwoToFractionalPartOfX);
3997
3998      if (DisableScheduling) {
3999        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4000        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4001        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4002        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4003        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4004        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4005        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4006        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4007        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4008      }
4009    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
4010      // For floating-point precision of 18:
4011      //
4012      //   TwoToFractionalPartOfX =
4013      //     0.999999982f +
4014      //       (0.693148872f +
4015      //         (0.240227044f +
4016      //           (0.554906021e-1f +
4017      //             (0.961591928e-2f +
4018      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4019      // error 2.47208000*10^(-7), which is better than 18 bits
4020      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4021                               getF32Constant(DAG, 0x3924b03e));
4022      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4023                               getF32Constant(DAG, 0x3ab24b87));
4024      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4025      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4026                               getF32Constant(DAG, 0x3c1d8c17));
4027      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4028      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4029                               getF32Constant(DAG, 0x3d634a1d));
4030      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4031      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4032                               getF32Constant(DAG, 0x3e75fe14));
4033      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4034      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4035                                getF32Constant(DAG, 0x3f317234));
4036      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4037      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4038                                getF32Constant(DAG, 0x3f800000));
4039      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
4040      SDValue TwoToFractionalPartOfX =
4041        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
4042
4043      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4044                           MVT::f32, TwoToFractionalPartOfX);
4045
4046      if (DisableScheduling) {
4047        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4048        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4049        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4050        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4051        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4052        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4053        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4054        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
4055        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
4056        DAG.AssignOrdering(t11.getNode(), SDNodeOrder);
4057        DAG.AssignOrdering(t12.getNode(), SDNodeOrder);
4058        DAG.AssignOrdering(t13.getNode(), SDNodeOrder);
4059        DAG.AssignOrdering(t14.getNode(), SDNodeOrder);
4060        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4061        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4062      }
4063    }
4064  } else {
4065    // No special expansion.
4066    result = DAG.getNode(ISD::FEXP2, dl,
4067                         getValue(I.getOperand(1)).getValueType(),
4068                         getValue(I.getOperand(1)));
4069
4070    if (DisableScheduling)
4071      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4072  }
4073
4074  setValue(&I, result);
4075}
4076
4077/// visitPow - Lower a pow intrinsic. Handles the special sequences for
4078/// limited-precision mode with x == 10.0f.
4079void
4080SelectionDAGBuilder::visitPow(CallInst &I) {
4081  SDValue result;
4082  Value *Val = I.getOperand(1);
4083  DebugLoc dl = getCurDebugLoc();
4084  bool IsExp10 = false;
4085
4086  if (getValue(Val).getValueType() == MVT::f32 &&
4087      getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
4088      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4089    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
4090      if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
4091        APFloat Ten(10.0f);
4092        IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
4093      }
4094    }
4095  }
4096
4097  if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4098    SDValue Op = getValue(I.getOperand(2));
4099
4100    // Put the exponent in the right bit position for later addition to the
4101    // final result:
4102    //
4103    //   #define LOG2OF10 3.3219281f
4104    //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
4105    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4106                             getF32Constant(DAG, 0x40549a78));
4107    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4108
4109    //   FractionalPartOfX = x - (float)IntegerPartOfX;
4110    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4111    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4112
4113    if (DisableScheduling) {
4114      DAG.AssignOrdering(t0.getNode(), SDNodeOrder);
4115      DAG.AssignOrdering(t1.getNode(), SDNodeOrder);
4116      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
4117      DAG.AssignOrdering(X.getNode(), SDNodeOrder);
4118    }
4119
4120    //   IntegerPartOfX <<= 23;
4121    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4122                                 DAG.getConstant(23, TLI.getPointerTy()));
4123
4124    if (DisableScheduling)
4125      DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder);
4126
4127    if (LimitFloatPrecision <= 6) {
4128      // For floating-point precision of 6:
4129      //
4130      //   twoToFractionalPartOfX =
4131      //     0.997535578f +
4132      //       (0.735607626f + 0.252464424f * x) * x;
4133      //
4134      // error 0.0144103317, which is 6 bits
4135      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4136                               getF32Constant(DAG, 0x3e814304));
4137      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4138                               getF32Constant(DAG, 0x3f3c50c8));
4139      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4140      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4141                               getF32Constant(DAG, 0x3f7f5e7e));
4142      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
4143      SDValue TwoToFractionalPartOfX =
4144        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
4145
4146      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4147                           MVT::f32, TwoToFractionalPartOfX);
4148
4149      if (DisableScheduling) {
4150        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4151        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4152        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4153        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4154        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4155        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4156        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4157      }
4158    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
4159      // For floating-point precision of 12:
4160      //
4161      //   TwoToFractionalPartOfX =
4162      //     0.999892986f +
4163      //       (0.696457318f +
4164      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4165      //
4166      // error 0.000107046256, which is 13 to 14 bits
4167      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4168                               getF32Constant(DAG, 0x3da235e3));
4169      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4170                               getF32Constant(DAG, 0x3e65b8f3));
4171      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4172      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4173                               getF32Constant(DAG, 0x3f324b07));
4174      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4175      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4176                               getF32Constant(DAG, 0x3f7ff8fd));
4177      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
4178      SDValue TwoToFractionalPartOfX =
4179        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
4180
4181      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4182                           MVT::f32, TwoToFractionalPartOfX);
4183
4184      if (DisableScheduling) {
4185        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4186        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4187        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4188        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4189        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4190        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4191        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4192        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4193        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4194      }
4195    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
4196      // For floating-point precision of 18:
4197      //
4198      //   TwoToFractionalPartOfX =
4199      //     0.999999982f +
4200      //       (0.693148872f +
4201      //         (0.240227044f +
4202      //           (0.554906021e-1f +
4203      //             (0.961591928e-2f +
4204      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4205      // error 2.47208000*10^(-7), which is better than 18 bits
4206      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4207                               getF32Constant(DAG, 0x3924b03e));
4208      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4209                               getF32Constant(DAG, 0x3ab24b87));
4210      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4211      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4212                               getF32Constant(DAG, 0x3c1d8c17));
4213      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4214      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4215                               getF32Constant(DAG, 0x3d634a1d));
4216      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4217      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4218                               getF32Constant(DAG, 0x3e75fe14));
4219      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4220      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4221                                getF32Constant(DAG, 0x3f317234));
4222      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4223      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4224                                getF32Constant(DAG, 0x3f800000));
4225      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
4226      SDValue TwoToFractionalPartOfX =
4227        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
4228
4229      result = DAG.getNode(ISD::BIT_CONVERT, dl,
4230                           MVT::f32, TwoToFractionalPartOfX);
4231
4232      if (DisableScheduling) {
4233        DAG.AssignOrdering(t2.getNode(), SDNodeOrder);
4234        DAG.AssignOrdering(t3.getNode(), SDNodeOrder);
4235        DAG.AssignOrdering(t4.getNode(), SDNodeOrder);
4236        DAG.AssignOrdering(t5.getNode(), SDNodeOrder);
4237        DAG.AssignOrdering(t6.getNode(), SDNodeOrder);
4238        DAG.AssignOrdering(t7.getNode(), SDNodeOrder);
4239        DAG.AssignOrdering(t8.getNode(), SDNodeOrder);
4240        DAG.AssignOrdering(t9.getNode(), SDNodeOrder);
4241        DAG.AssignOrdering(t10.getNode(), SDNodeOrder);
4242        DAG.AssignOrdering(t11.getNode(), SDNodeOrder);
4243        DAG.AssignOrdering(t12.getNode(), SDNodeOrder);
4244        DAG.AssignOrdering(t13.getNode(), SDNodeOrder);
4245        DAG.AssignOrdering(t14.getNode(), SDNodeOrder);
4246        DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder);
4247        DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4248      }
4249    }
4250  } else {
4251    // No special expansion.
4252    result = DAG.getNode(ISD::FPOW, dl,
4253                         getValue(I.getOperand(1)).getValueType(),
4254                         getValue(I.getOperand(1)),
4255                         getValue(I.getOperand(2)));
4256
4257    if (DisableScheduling)
4258      DAG.AssignOrdering(result.getNode(), SDNodeOrder);
4259  }
4260
4261  setValue(&I, result);
4262}
4263
4264/// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
4265/// we want to emit this as a call to a named external function, return the name
4266/// otherwise lower it and return null.
4267const char *
4268SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
4269  DebugLoc dl = getCurDebugLoc();
4270  SDValue Res;
4271
4272  switch (Intrinsic) {
4273  default:
4274    // By default, turn this into a target intrinsic node.
4275    visitTargetIntrinsic(I, Intrinsic);
4276    return 0;
4277  case Intrinsic::vastart:  visitVAStart(I); return 0;
4278  case Intrinsic::vaend:    visitVAEnd(I); return 0;
4279  case Intrinsic::vacopy:   visitVACopy(I); return 0;
4280  case Intrinsic::returnaddress:
4281    Res = DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
4282                      getValue(I.getOperand(1)));
4283    setValue(&I, Res);
4284    if (DisableScheduling)
4285      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4286    return 0;
4287  case Intrinsic::frameaddress:
4288    Res = DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
4289                      getValue(I.getOperand(1)));
4290    setValue(&I, Res);
4291    if (DisableScheduling)
4292      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4293    return 0;
4294  case Intrinsic::setjmp:
4295    return "_setjmp"+!TLI.usesUnderscoreSetJmp();
4296  case Intrinsic::longjmp:
4297    return "_longjmp"+!TLI.usesUnderscoreLongJmp();
4298  case Intrinsic::memcpy: {
4299    SDValue Op1 = getValue(I.getOperand(1));
4300    SDValue Op2 = getValue(I.getOperand(2));
4301    SDValue Op3 = getValue(I.getOperand(3));
4302    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
4303    Res = DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
4304                        I.getOperand(1), 0, I.getOperand(2), 0);
4305    DAG.setRoot(Res);
4306    if (DisableScheduling)
4307      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4308    return 0;
4309  }
4310  case Intrinsic::memset: {
4311    SDValue Op1 = getValue(I.getOperand(1));
4312    SDValue Op2 = getValue(I.getOperand(2));
4313    SDValue Op3 = getValue(I.getOperand(3));
4314    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
4315    Res = DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
4316                        I.getOperand(1), 0);
4317    DAG.setRoot(Res);
4318    if (DisableScheduling)
4319      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4320    return 0;
4321  }
4322  case Intrinsic::memmove: {
4323    SDValue Op1 = getValue(I.getOperand(1));
4324    SDValue Op2 = getValue(I.getOperand(2));
4325    SDValue Op3 = getValue(I.getOperand(3));
4326    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
4327
4328    // If the source and destination are known to not be aliases, we can
4329    // lower memmove as memcpy.
4330    uint64_t Size = -1ULL;
4331    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
4332      Size = C->getZExtValue();
4333    if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
4334        AliasAnalysis::NoAlias) {
4335      Res = DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
4336                          I.getOperand(1), 0, I.getOperand(2), 0);
4337      DAG.setRoot(Res);
4338      if (DisableScheduling)
4339        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4340      return 0;
4341    }
4342
4343    Res = DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
4344                         I.getOperand(1), 0, I.getOperand(2), 0);
4345    DAG.setRoot(Res);
4346    if (DisableScheduling)
4347      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4348    return 0;
4349  }
4350  case Intrinsic::dbg_stoppoint:
4351  case Intrinsic::dbg_region_start:
4352  case Intrinsic::dbg_region_end:
4353  case Intrinsic::dbg_func_start:
4354    // FIXME - Remove this instructions once the dust settles.
4355    return 0;
4356  case Intrinsic::dbg_declare: {
4357    if (OptLevel != CodeGenOpt::None)
4358      // FIXME: Variable debug info is not supported here.
4359      return 0;
4360    DwarfWriter *DW = DAG.getDwarfWriter();
4361    if (!DW)
4362      return 0;
4363    DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4364    if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
4365      return 0;
4366
4367    MDNode *Variable = DI.getVariable();
4368    Value *Address = DI.getAddress();
4369    if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4370      Address = BCI->getOperand(0);
4371    AllocaInst *AI = dyn_cast<AllocaInst>(Address);
4372    // Don't handle byval struct arguments or VLAs, for example.
4373    if (!AI)
4374      return 0;
4375    DenseMap<const AllocaInst*, int>::iterator SI =
4376      FuncInfo.StaticAllocaMap.find(AI);
4377    if (SI == FuncInfo.StaticAllocaMap.end())
4378      return 0; // VLAs.
4379    int FI = SI->second;
4380
4381    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo())
4382      if (MDNode *Dbg = DI.getMetadata("dbg"))
4383        MMI->setVariableDbgInfo(Variable, FI, Dbg);
4384    return 0;
4385  }
4386  case Intrinsic::eh_exception: {
4387    // Insert the EXCEPTIONADDR instruction.
4388    assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
4389    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4390    SDValue Ops[1];
4391    Ops[0] = DAG.getRoot();
4392    SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
4393    setValue(&I, Op);
4394    DAG.setRoot(Op.getValue(1));
4395    if (DisableScheduling)
4396      DAG.AssignOrdering(Op.getNode(), SDNodeOrder);
4397    return 0;
4398  }
4399
4400  case Intrinsic::eh_selector: {
4401    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4402
4403    if (CurMBB->isLandingPad())
4404      AddCatchInfo(I, MMI, CurMBB);
4405    else {
4406#ifndef NDEBUG
4407      FuncInfo.CatchInfoLost.insert(&I);
4408#endif
4409      // FIXME: Mark exception selector register as live in.  Hack for PR1508.
4410      unsigned Reg = TLI.getExceptionSelectorRegister();
4411      if (Reg) CurMBB->addLiveIn(Reg);
4412    }
4413
4414    // Insert the EHSELECTION instruction.
4415    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4416    SDValue Ops[2];
4417    Ops[0] = getValue(I.getOperand(1));
4418    Ops[1] = getRoot();
4419    SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4420
4421    DAG.setRoot(Op.getValue(1));
4422
4423    Res = DAG.getSExtOrTrunc(Op, dl, MVT::i32);
4424    setValue(&I, Res);
4425    if (DisableScheduling) {
4426      DAG.AssignOrdering(Op.getNode(), SDNodeOrder);
4427      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4428    }
4429    return 0;
4430  }
4431
4432  case Intrinsic::eh_typeid_for: {
4433    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4434
4435    if (MMI) {
4436      // Find the type id for the given typeinfo.
4437      GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4438      unsigned TypeID = MMI->getTypeIDFor(GV);
4439      Res = DAG.getConstant(TypeID, MVT::i32);
4440    } else {
4441      // Return something different to eh_selector.
4442      Res = DAG.getConstant(1, MVT::i32);
4443    }
4444
4445    setValue(&I, Res);
4446    if (DisableScheduling)
4447      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4448    return 0;
4449  }
4450
4451  case Intrinsic::eh_return_i32:
4452  case Intrinsic::eh_return_i64:
4453    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4454      MMI->setCallsEHReturn(true);
4455      Res = DAG.getNode(ISD::EH_RETURN, dl,
4456                        MVT::Other,
4457                        getControlRoot(),
4458                        getValue(I.getOperand(1)),
4459                        getValue(I.getOperand(2)));
4460      DAG.setRoot(Res);
4461      if (DisableScheduling)
4462        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4463    } else {
4464      setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4465    }
4466
4467    return 0;
4468  case Intrinsic::eh_unwind_init:
4469    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4470      MMI->setCallsUnwindInit(true);
4471    }
4472    return 0;
4473  case Intrinsic::eh_dwarf_cfa: {
4474    EVT VT = getValue(I.getOperand(1)).getValueType();
4475    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
4476                                        TLI.getPointerTy());
4477    SDValue Offset = DAG.getNode(ISD::ADD, dl,
4478                                 TLI.getPointerTy(),
4479                                 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4480                                             TLI.getPointerTy()),
4481                                 CfaArg);
4482    SDValue FA = DAG.getNode(ISD::FRAMEADDR, dl,
4483                             TLI.getPointerTy(),
4484                             DAG.getConstant(0, TLI.getPointerTy()));
4485    Res = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
4486                      FA, Offset);
4487    setValue(&I, Res);
4488    if (DisableScheduling) {
4489      DAG.AssignOrdering(CfaArg.getNode(), SDNodeOrder);
4490      DAG.AssignOrdering(Offset.getNode(), SDNodeOrder);
4491      DAG.AssignOrdering(FA.getNode(), SDNodeOrder);
4492      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4493    }
4494    return 0;
4495  }
4496  case Intrinsic::convertff:
4497  case Intrinsic::convertfsi:
4498  case Intrinsic::convertfui:
4499  case Intrinsic::convertsif:
4500  case Intrinsic::convertuif:
4501  case Intrinsic::convertss:
4502  case Intrinsic::convertsu:
4503  case Intrinsic::convertus:
4504  case Intrinsic::convertuu: {
4505    ISD::CvtCode Code = ISD::CVT_INVALID;
4506    switch (Intrinsic) {
4507    case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
4508    case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4509    case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4510    case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4511    case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4512    case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
4513    case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
4514    case Intrinsic::convertus:  Code = ISD::CVT_US; break;
4515    case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
4516    }
4517    EVT DestVT = TLI.getValueType(I.getType());
4518    Value *Op1 = I.getOperand(1);
4519    Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4520                               DAG.getValueType(DestVT),
4521                               DAG.getValueType(getValue(Op1).getValueType()),
4522                               getValue(I.getOperand(2)),
4523                               getValue(I.getOperand(3)),
4524                               Code);
4525    setValue(&I, Res);
4526    if (DisableScheduling)
4527      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4528    return 0;
4529  }
4530  case Intrinsic::sqrt:
4531    Res = DAG.getNode(ISD::FSQRT, dl,
4532                      getValue(I.getOperand(1)).getValueType(),
4533                      getValue(I.getOperand(1)));
4534    setValue(&I, Res);
4535    if (DisableScheduling)
4536      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4537    return 0;
4538  case Intrinsic::powi:
4539    Res = DAG.getNode(ISD::FPOWI, dl,
4540                      getValue(I.getOperand(1)).getValueType(),
4541                      getValue(I.getOperand(1)),
4542                      getValue(I.getOperand(2)));
4543    setValue(&I, Res);
4544    if (DisableScheduling)
4545      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4546    return 0;
4547  case Intrinsic::sin:
4548    Res = DAG.getNode(ISD::FSIN, dl,
4549                      getValue(I.getOperand(1)).getValueType(),
4550                      getValue(I.getOperand(1)));
4551    setValue(&I, Res);
4552    if (DisableScheduling)
4553      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4554    return 0;
4555  case Intrinsic::cos:
4556    Res = DAG.getNode(ISD::FCOS, dl,
4557                      getValue(I.getOperand(1)).getValueType(),
4558                      getValue(I.getOperand(1)));
4559    setValue(&I, Res);
4560    if (DisableScheduling)
4561      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4562    return 0;
4563  case Intrinsic::log:
4564    visitLog(I);
4565    return 0;
4566  case Intrinsic::log2:
4567    visitLog2(I);
4568    return 0;
4569  case Intrinsic::log10:
4570    visitLog10(I);
4571    return 0;
4572  case Intrinsic::exp:
4573    visitExp(I);
4574    return 0;
4575  case Intrinsic::exp2:
4576    visitExp2(I);
4577    return 0;
4578  case Intrinsic::pow:
4579    visitPow(I);
4580    return 0;
4581  case Intrinsic::pcmarker: {
4582    SDValue Tmp = getValue(I.getOperand(1));
4583    Res = DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp);
4584    DAG.setRoot(Res);
4585    if (DisableScheduling)
4586      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4587    return 0;
4588  }
4589  case Intrinsic::readcyclecounter: {
4590    SDValue Op = getRoot();
4591    Res = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4592                      DAG.getVTList(MVT::i64, MVT::Other),
4593                      &Op, 1);
4594    setValue(&I, Res);
4595    DAG.setRoot(Res.getValue(1));
4596    if (DisableScheduling)
4597      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4598    return 0;
4599  }
4600  case Intrinsic::bswap:
4601    Res = DAG.getNode(ISD::BSWAP, dl,
4602                      getValue(I.getOperand(1)).getValueType(),
4603                      getValue(I.getOperand(1)));
4604    setValue(&I, Res);
4605    if (DisableScheduling)
4606      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4607    return 0;
4608  case Intrinsic::cttz: {
4609    SDValue Arg = getValue(I.getOperand(1));
4610    EVT Ty = Arg.getValueType();
4611    Res = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4612    setValue(&I, Res);
4613    if (DisableScheduling)
4614      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4615    return 0;
4616  }
4617  case Intrinsic::ctlz: {
4618    SDValue Arg = getValue(I.getOperand(1));
4619    EVT Ty = Arg.getValueType();
4620    Res = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4621    setValue(&I, Res);
4622    if (DisableScheduling)
4623      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4624    return 0;
4625  }
4626  case Intrinsic::ctpop: {
4627    SDValue Arg = getValue(I.getOperand(1));
4628    EVT Ty = Arg.getValueType();
4629    Res = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4630    setValue(&I, Res);
4631    if (DisableScheduling)
4632      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4633    return 0;
4634  }
4635  case Intrinsic::stacksave: {
4636    SDValue Op = getRoot();
4637    Res = DAG.getNode(ISD::STACKSAVE, dl,
4638                      DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4639    setValue(&I, Res);
4640    DAG.setRoot(Res.getValue(1));
4641    if (DisableScheduling)
4642      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4643    return 0;
4644  }
4645  case Intrinsic::stackrestore: {
4646    Res = getValue(I.getOperand(1));
4647    Res = DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res);
4648    DAG.setRoot(Res);
4649    if (DisableScheduling)
4650      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4651    return 0;
4652  }
4653  case Intrinsic::stackprotector: {
4654    // Emit code into the DAG to store the stack guard onto the stack.
4655    MachineFunction &MF = DAG.getMachineFunction();
4656    MachineFrameInfo *MFI = MF.getFrameInfo();
4657    EVT PtrTy = TLI.getPointerTy();
4658
4659    SDValue Src = getValue(I.getOperand(1));   // The guard's value.
4660    AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4661
4662    int FI = FuncInfo.StaticAllocaMap[Slot];
4663    MFI->setStackProtectorIndex(FI);
4664
4665    SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4666
4667    // Store the stack protector onto the stack.
4668    Res = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4669                       PseudoSourceValue::getFixedStack(FI),
4670                       0, true);
4671    setValue(&I, Res);
4672    DAG.setRoot(Res);
4673    if (DisableScheduling)
4674      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4675    return 0;
4676  }
4677  case Intrinsic::objectsize: {
4678    // If we don't know by now, we're never going to know.
4679    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
4680
4681    assert(CI && "Non-constant type in __builtin_object_size?");
4682
4683    SDValue Arg = getValue(I.getOperand(0));
4684    EVT Ty = Arg.getValueType();
4685
4686    if (CI->getZExtValue() == 0)
4687      Res = DAG.getConstant(-1ULL, Ty);
4688    else
4689      Res = DAG.getConstant(0, Ty);
4690
4691    setValue(&I, Res);
4692    if (DisableScheduling)
4693      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4694    return 0;
4695  }
4696  case Intrinsic::var_annotation:
4697    // Discard annotate attributes
4698    return 0;
4699
4700  case Intrinsic::init_trampoline: {
4701    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4702
4703    SDValue Ops[6];
4704    Ops[0] = getRoot();
4705    Ops[1] = getValue(I.getOperand(1));
4706    Ops[2] = getValue(I.getOperand(2));
4707    Ops[3] = getValue(I.getOperand(3));
4708    Ops[4] = DAG.getSrcValue(I.getOperand(1));
4709    Ops[5] = DAG.getSrcValue(F);
4710
4711    Res = DAG.getNode(ISD::TRAMPOLINE, dl,
4712                      DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4713                      Ops, 6);
4714
4715    setValue(&I, Res);
4716    DAG.setRoot(Res.getValue(1));
4717    if (DisableScheduling)
4718      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4719    return 0;
4720  }
4721  case Intrinsic::gcroot:
4722    if (GFI) {
4723      Value *Alloca = I.getOperand(1);
4724      Constant *TypeMap = cast<Constant>(I.getOperand(2));
4725
4726      FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4727      GFI->addStackRoot(FI->getIndex(), TypeMap);
4728    }
4729    return 0;
4730  case Intrinsic::gcread:
4731  case Intrinsic::gcwrite:
4732    llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4733    return 0;
4734  case Intrinsic::flt_rounds:
4735    Res = DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32);
4736    setValue(&I, Res);
4737    if (DisableScheduling)
4738      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4739    return 0;
4740  case Intrinsic::trap:
4741    Res = DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot());
4742    DAG.setRoot(Res);
4743    if (DisableScheduling)
4744      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4745    return 0;
4746  case Intrinsic::uadd_with_overflow:
4747    return implVisitAluOverflow(I, ISD::UADDO);
4748  case Intrinsic::sadd_with_overflow:
4749    return implVisitAluOverflow(I, ISD::SADDO);
4750  case Intrinsic::usub_with_overflow:
4751    return implVisitAluOverflow(I, ISD::USUBO);
4752  case Intrinsic::ssub_with_overflow:
4753    return implVisitAluOverflow(I, ISD::SSUBO);
4754  case Intrinsic::umul_with_overflow:
4755    return implVisitAluOverflow(I, ISD::UMULO);
4756  case Intrinsic::smul_with_overflow:
4757    return implVisitAluOverflow(I, ISD::SMULO);
4758
4759  case Intrinsic::prefetch: {
4760    SDValue Ops[4];
4761    Ops[0] = getRoot();
4762    Ops[1] = getValue(I.getOperand(1));
4763    Ops[2] = getValue(I.getOperand(2));
4764    Ops[3] = getValue(I.getOperand(3));
4765    Res = DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4);
4766    DAG.setRoot(Res);
4767    if (DisableScheduling)
4768      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4769    return 0;
4770  }
4771
4772  case Intrinsic::memory_barrier: {
4773    SDValue Ops[6];
4774    Ops[0] = getRoot();
4775    for (int x = 1; x < 6; ++x)
4776      Ops[x] = getValue(I.getOperand(x));
4777
4778    Res = DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6);
4779    DAG.setRoot(Res);
4780    if (DisableScheduling)
4781      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4782    return 0;
4783  }
4784  case Intrinsic::atomic_cmp_swap: {
4785    SDValue Root = getRoot();
4786    SDValue L =
4787      DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4788                    getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4789                    Root,
4790                    getValue(I.getOperand(1)),
4791                    getValue(I.getOperand(2)),
4792                    getValue(I.getOperand(3)),
4793                    I.getOperand(1));
4794    setValue(&I, L);
4795    DAG.setRoot(L.getValue(1));
4796    if (DisableScheduling)
4797      DAG.AssignOrdering(L.getNode(), SDNodeOrder);
4798    return 0;
4799  }
4800  case Intrinsic::atomic_load_add:
4801    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4802  case Intrinsic::atomic_load_sub:
4803    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4804  case Intrinsic::atomic_load_or:
4805    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4806  case Intrinsic::atomic_load_xor:
4807    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4808  case Intrinsic::atomic_load_and:
4809    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4810  case Intrinsic::atomic_load_nand:
4811    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4812  case Intrinsic::atomic_load_max:
4813    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4814  case Intrinsic::atomic_load_min:
4815    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4816  case Intrinsic::atomic_load_umin:
4817    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4818  case Intrinsic::atomic_load_umax:
4819    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4820  case Intrinsic::atomic_swap:
4821    return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4822
4823  case Intrinsic::invariant_start:
4824  case Intrinsic::lifetime_start:
4825    // Discard region information.
4826    Res = DAG.getUNDEF(TLI.getPointerTy());
4827    setValue(&I, Res);
4828    if (DisableScheduling)
4829      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
4830    return 0;
4831  case Intrinsic::invariant_end:
4832  case Intrinsic::lifetime_end:
4833    // Discard region information.
4834    return 0;
4835  }
4836}
4837
4838/// Test if the given instruction is in a position to be optimized
4839/// with a tail-call. This roughly means that it's in a block with
4840/// a return and there's nothing that needs to be scheduled
4841/// between it and the return.
4842///
4843/// This function only tests target-independent requirements.
4844/// For target-dependent requirements, a target should override
4845/// TargetLowering::IsEligibleForTailCallOptimization.
4846///
4847static bool
4848isInTailCallPosition(const Instruction *I, Attributes CalleeRetAttr,
4849                     const TargetLowering &TLI) {
4850  const BasicBlock *ExitBB = I->getParent();
4851  const TerminatorInst *Term = ExitBB->getTerminator();
4852  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4853  const Function *F = ExitBB->getParent();
4854
4855  // The block must end in a return statement or an unreachable.
4856  if (!Ret && !isa<UnreachableInst>(Term)) return false;
4857
4858  // If I will have a chain, make sure no other instruction that will have a
4859  // chain interposes between I and the return.
4860  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4861      !I->isSafeToSpeculativelyExecute())
4862    for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4863         --BBI) {
4864      if (&*BBI == I)
4865        break;
4866      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4867          !BBI->isSafeToSpeculativelyExecute())
4868        return false;
4869    }
4870
4871  // If the block ends with a void return or unreachable, it doesn't matter
4872  // what the call's return type is.
4873  if (!Ret || Ret->getNumOperands() == 0) return true;
4874
4875  // If the return value is undef, it doesn't matter what the call's
4876  // return type is.
4877  if (isa<UndefValue>(Ret->getOperand(0))) return true;
4878
4879  // Conservatively require the attributes of the call to match those of
4880  // the return. Ignore noalias because it doesn't affect the call sequence.
4881  unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
4882  if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
4883    return false;
4884
4885  // Otherwise, make sure the unmodified return value of I is the return value.
4886  for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4887       U = dyn_cast<Instruction>(U->getOperand(0))) {
4888    if (!U)
4889      return false;
4890    if (!U->hasOneUse())
4891      return false;
4892    if (U == I)
4893      break;
4894    // Check for a truly no-op truncate.
4895    if (isa<TruncInst>(U) &&
4896        TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4897      continue;
4898    // Check for a truly no-op bitcast.
4899    if (isa<BitCastInst>(U) &&
4900        (U->getOperand(0)->getType() == U->getType() ||
4901         (isa<PointerType>(U->getOperand(0)->getType()) &&
4902          isa<PointerType>(U->getType()))))
4903      continue;
4904    // Otherwise it's not a true no-op.
4905    return false;
4906  }
4907
4908  return true;
4909}
4910
4911void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
4912                                      bool isTailCall,
4913                                      MachineBasicBlock *LandingPad) {
4914  const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4915  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4916  const Type *RetTy = FTy->getReturnType();
4917  MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4918  unsigned BeginLabel = 0, EndLabel = 0;
4919
4920  TargetLowering::ArgListTy Args;
4921  TargetLowering::ArgListEntry Entry;
4922  Args.reserve(CS.arg_size());
4923
4924  // Check whether the function can return without sret-demotion.
4925  SmallVector<EVT, 4> OutVTs;
4926  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
4927  SmallVector<uint64_t, 4> Offsets;
4928  getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
4929                OutVTs, OutsFlags, TLI, &Offsets);
4930
4931  bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
4932                        FTy->isVarArg(), OutVTs, OutsFlags, DAG);
4933
4934  SDValue DemoteStackSlot;
4935
4936  if (!CanLowerReturn) {
4937    uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
4938                      FTy->getReturnType());
4939    unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(
4940                      FTy->getReturnType());
4941    MachineFunction &MF = DAG.getMachineFunction();
4942    int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
4943    const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
4944
4945    DemoteStackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
4946    Entry.Node = DemoteStackSlot;
4947    Entry.Ty = StackSlotPtrType;
4948    Entry.isSExt = false;
4949    Entry.isZExt = false;
4950    Entry.isInReg = false;
4951    Entry.isSRet = true;
4952    Entry.isNest = false;
4953    Entry.isByVal = false;
4954    Entry.Alignment = Align;
4955    Args.push_back(Entry);
4956    RetTy = Type::getVoidTy(FTy->getContext());
4957  }
4958
4959  for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4960       i != e; ++i) {
4961    SDValue ArgNode = getValue(*i);
4962    Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4963
4964    unsigned attrInd = i - CS.arg_begin() + 1;
4965    Entry.isSExt  = CS.paramHasAttr(attrInd, Attribute::SExt);
4966    Entry.isZExt  = CS.paramHasAttr(attrInd, Attribute::ZExt);
4967    Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4968    Entry.isSRet  = CS.paramHasAttr(attrInd, Attribute::StructRet);
4969    Entry.isNest  = CS.paramHasAttr(attrInd, Attribute::Nest);
4970    Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4971    Entry.Alignment = CS.getParamAlignment(attrInd);
4972    Args.push_back(Entry);
4973  }
4974
4975  if (LandingPad && MMI) {
4976    // Insert a label before the invoke call to mark the try range.  This can be
4977    // used to detect deletion of the invoke via the MachineModuleInfo.
4978    BeginLabel = MMI->NextLabelID();
4979
4980    // Both PendingLoads and PendingExports must be flushed here;
4981    // this call might not return.
4982    (void)getRoot();
4983    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4984                             getControlRoot(), BeginLabel));
4985  }
4986
4987  // Check if target-independent constraints permit a tail call here.
4988  // Target-dependent constraints are checked within TLI.LowerCallTo.
4989  if (isTailCall &&
4990      !isInTailCallPosition(CS.getInstruction(),
4991                            CS.getAttributes().getRetAttributes(),
4992                            TLI))
4993    isTailCall = false;
4994
4995  std::pair<SDValue,SDValue> Result =
4996    TLI.LowerCallTo(getRoot(), RetTy,
4997                    CS.paramHasAttr(0, Attribute::SExt),
4998                    CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4999                    CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
5000                    CS.getCallingConv(),
5001                    isTailCall,
5002                    !CS.getInstruction()->use_empty(),
5003                    Callee, Args, DAG, getCurDebugLoc(), SDNodeOrder);
5004  assert((isTailCall || Result.second.getNode()) &&
5005         "Non-null chain expected with non-tail call!");
5006  assert((Result.second.getNode() || !Result.first.getNode()) &&
5007         "Null value expected with tail call!");
5008  if (Result.first.getNode()) {
5009    setValue(CS.getInstruction(), Result.first);
5010    if (DisableScheduling)
5011      DAG.AssignOrdering(Result.first.getNode(), SDNodeOrder);
5012  } else if (!CanLowerReturn && Result.second.getNode()) {
5013    // The instruction result is the result of loading from the
5014    // hidden sret parameter.
5015    SmallVector<EVT, 1> PVTs;
5016    const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
5017
5018    ComputeValueVTs(TLI, PtrRetTy, PVTs);
5019    assert(PVTs.size() == 1 && "Pointers should fit in one register");
5020    EVT PtrVT = PVTs[0];
5021    unsigned NumValues = OutVTs.size();
5022    SmallVector<SDValue, 4> Values(NumValues);
5023    SmallVector<SDValue, 4> Chains(NumValues);
5024
5025    for (unsigned i = 0; i < NumValues; ++i) {
5026      SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
5027                                DemoteStackSlot,
5028                                DAG.getConstant(Offsets[i], PtrVT));
5029      SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
5030                              Add, NULL, Offsets[i], false, 1);
5031      Values[i] = L;
5032      Chains[i] = L.getValue(1);
5033    }
5034
5035    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
5036                                MVT::Other, &Chains[0], NumValues);
5037    PendingLoads.push_back(Chain);
5038
5039    SDValue MV = DAG.getNode(ISD::MERGE_VALUES,
5040                             getCurDebugLoc(),
5041                             DAG.getVTList(&OutVTs[0], NumValues),
5042                             &Values[0], NumValues);
5043    setValue(CS.getInstruction(), MV);
5044
5045    if (DisableScheduling) {
5046      DAG.AssignOrdering(Chain.getNode(), SDNodeOrder);
5047      DAG.AssignOrdering(MV.getNode(), SDNodeOrder);
5048    }
5049  }
5050
5051  // As a special case, a null chain means that a tail call has been emitted and
5052  // the DAG root is already updated.
5053  if (Result.second.getNode()) {
5054    DAG.setRoot(Result.second);
5055    if (DisableScheduling)
5056      DAG.AssignOrdering(Result.second.getNode(), SDNodeOrder);
5057  } else {
5058    HasTailCall = true;
5059  }
5060
5061  if (LandingPad && MMI) {
5062    // Insert a label at the end of the invoke call to mark the try range.  This
5063    // can be used to detect deletion of the invoke via the MachineModuleInfo.
5064    EndLabel = MMI->NextLabelID();
5065    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
5066                             getRoot(), EndLabel));
5067
5068    // Inform MachineModuleInfo of range.
5069    MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
5070  }
5071}
5072
5073/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
5074/// value is equal or not-equal to zero.
5075static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
5076  for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
5077       UI != E; ++UI) {
5078    if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
5079      if (IC->isEquality())
5080        if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5081          if (C->isNullValue())
5082            continue;
5083    // Unknown instruction.
5084    return false;
5085  }
5086  return true;
5087}
5088
5089static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy,
5090                             SelectionDAGBuilder &Builder) {
5091
5092  // Check to see if this load can be trivially constant folded, e.g. if the
5093  // input is from a string literal.
5094  if (Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5095    // Cast pointer to the type we really want to load.
5096    LoadInput = ConstantExpr::getBitCast(LoadInput,
5097                                         PointerType::getUnqual(LoadTy));
5098
5099    if (Constant *LoadCst = ConstantFoldLoadFromConstPtr(LoadInput, Builder.TD))
5100      return Builder.getValue(LoadCst);
5101  }
5102
5103  // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
5104  // still constant memory, the input chain can be the entry node.
5105  SDValue Root;
5106  bool ConstantMemory = false;
5107
5108  // Do not serialize (non-volatile) loads of constant memory with anything.
5109  if (Builder.AA->pointsToConstantMemory(PtrVal)) {
5110    Root = Builder.DAG.getEntryNode();
5111    ConstantMemory = true;
5112  } else {
5113    // Do not serialize non-volatile loads against each other.
5114    Root = Builder.DAG.getRoot();
5115  }
5116
5117  SDValue Ptr = Builder.getValue(PtrVal);
5118  SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root,
5119                                        Ptr, PtrVal /*SrcValue*/, 0/*SVOffset*/,
5120                                        false /*volatile*/, 1 /* align=1 */);
5121
5122  if (!ConstantMemory)
5123    Builder.PendingLoads.push_back(LoadVal.getValue(1));
5124  return LoadVal;
5125}
5126
5127
5128/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
5129/// If so, return true and lower it, otherwise return false and it will be
5130/// lowered like a normal call.
5131bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) {
5132  // Verify that the prototype makes sense.  int memcmp(void*,void*,size_t)
5133  if (I.getNumOperands() != 4)
5134    return false;
5135
5136  Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
5137  if (!isa<PointerType>(LHS->getType()) || !isa<PointerType>(RHS->getType()) ||
5138      !isa<IntegerType>(I.getOperand(3)->getType()) ||
5139      !isa<IntegerType>(I.getType()))
5140    return false;
5141
5142  ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
5143
5144  // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
5145  // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
5146  if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
5147    bool ActuallyDoIt = true;
5148    MVT LoadVT;
5149    const Type *LoadTy;
5150    switch (Size->getZExtValue()) {
5151    default:
5152      LoadVT = MVT::Other;
5153      LoadTy = 0;
5154      ActuallyDoIt = false;
5155      break;
5156    case 2:
5157      LoadVT = MVT::i16;
5158      LoadTy = Type::getInt16Ty(Size->getContext());
5159      break;
5160    case 4:
5161      LoadVT = MVT::i32;
5162      LoadTy = Type::getInt32Ty(Size->getContext());
5163      break;
5164    case 8:
5165      LoadVT = MVT::i64;
5166      LoadTy = Type::getInt64Ty(Size->getContext());
5167      break;
5168        /*
5169    case 16:
5170      LoadVT = MVT::v4i32;
5171      LoadTy = Type::getInt32Ty(Size->getContext());
5172      LoadTy = VectorType::get(LoadTy, 4);
5173      break;
5174         */
5175    }
5176
5177    // This turns into unaligned loads.  We only do this if the target natively
5178    // supports the MVT we'll be loading or if it is small enough (<= 4) that
5179    // we'll only produce a small number of byte loads.
5180
5181    // Require that we can find a legal MVT, and only do this if the target
5182    // supports unaligned loads of that type.  Expanding into byte loads would
5183    // bloat the code.
5184    if (ActuallyDoIt && Size->getZExtValue() > 4) {
5185      // TODO: Handle 5 byte compare as 4-byte + 1 byte.
5186      // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
5187      if (!TLI.isTypeLegal(LoadVT) ||!TLI.allowsUnalignedMemoryAccesses(LoadVT))
5188        ActuallyDoIt = false;
5189    }
5190
5191    if (ActuallyDoIt) {
5192      SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
5193      SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
5194
5195      SDValue Res = DAG.getSetCC(getCurDebugLoc(), MVT::i1, LHSVal, RHSVal,
5196                                 ISD::SETNE);
5197      EVT CallVT = TLI.getValueType(I.getType(), true);
5198      setValue(&I, DAG.getZExtOrTrunc(Res, getCurDebugLoc(), CallVT));
5199      return true;
5200    }
5201  }
5202
5203
5204  return false;
5205}
5206
5207
5208void SelectionDAGBuilder::visitCall(CallInst &I) {
5209  const char *RenameFn = 0;
5210  if (Function *F = I.getCalledFunction()) {
5211    if (F->isDeclaration()) {
5212      const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
5213      if (II) {
5214        if (unsigned IID = II->getIntrinsicID(F)) {
5215          RenameFn = visitIntrinsicCall(I, IID);
5216          if (!RenameFn)
5217            return;
5218        }
5219      }
5220      if (unsigned IID = F->getIntrinsicID()) {
5221        RenameFn = visitIntrinsicCall(I, IID);
5222        if (!RenameFn)
5223          return;
5224      }
5225    }
5226
5227    // Check for well-known libc/libm calls.  If the function is internal, it
5228    // can't be a library call.
5229    if (!F->hasLocalLinkage() && F->hasName()) {
5230      StringRef Name = F->getName();
5231      if (Name == "copysign" || Name == "copysignf") {
5232        if (I.getNumOperands() == 3 &&   // Basic sanity checks.
5233            I.getOperand(1)->getType()->isFloatingPoint() &&
5234            I.getType() == I.getOperand(1)->getType() &&
5235            I.getType() == I.getOperand(2)->getType()) {
5236          SDValue LHS = getValue(I.getOperand(1));
5237          SDValue RHS = getValue(I.getOperand(2));
5238          setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
5239                                   LHS.getValueType(), LHS, RHS));
5240          return;
5241        }
5242      } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
5243        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5244            I.getOperand(1)->getType()->isFloatingPoint() &&
5245            I.getType() == I.getOperand(1)->getType()) {
5246          SDValue Tmp = getValue(I.getOperand(1));
5247          setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
5248                                   Tmp.getValueType(), Tmp));
5249          return;
5250        }
5251      } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
5252        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5253            I.getOperand(1)->getType()->isFloatingPoint() &&
5254            I.getType() == I.getOperand(1)->getType() &&
5255            I.onlyReadsMemory()) {
5256          SDValue Tmp = getValue(I.getOperand(1));
5257          setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
5258                                   Tmp.getValueType(), Tmp));
5259          return;
5260        }
5261      } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
5262        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5263            I.getOperand(1)->getType()->isFloatingPoint() &&
5264            I.getType() == I.getOperand(1)->getType() &&
5265            I.onlyReadsMemory()) {
5266          SDValue Tmp = getValue(I.getOperand(1));
5267          setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
5268                                   Tmp.getValueType(), Tmp));
5269          return;
5270        }
5271      } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
5272        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
5273            I.getOperand(1)->getType()->isFloatingPoint() &&
5274            I.getType() == I.getOperand(1)->getType() &&
5275            I.onlyReadsMemory()) {
5276          SDValue Tmp = getValue(I.getOperand(1));
5277          setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
5278                                   Tmp.getValueType(), Tmp));
5279          return;
5280        }
5281      } else if (Name == "memcmp") {
5282        if (visitMemCmpCall(I))
5283          return;
5284      }
5285    }
5286  } else if (isa<InlineAsm>(I.getOperand(0))) {
5287    visitInlineAsm(&I);
5288    return;
5289  }
5290
5291  SDValue Callee;
5292  if (!RenameFn)
5293    Callee = getValue(I.getOperand(0));
5294  else
5295    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
5296
5297  // Check if we can potentially perform a tail call. More detailed checking is
5298  // be done within LowerCallTo, after more information about the call is known.
5299  bool isTailCall = PerformTailCallOpt && I.isTailCall();
5300
5301  LowerCallTo(&I, Callee, isTailCall);
5302}
5303
5304/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
5305/// this value and returns the result as a ValueVT value.  This uses
5306/// Chain/Flag as the input and updates them for the output Chain/Flag.
5307/// If the Flag pointer is NULL, no flag is used.
5308SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
5309                                      unsigned Order, SDValue &Chain,
5310                                      SDValue *Flag) const {
5311  // Assemble the legal parts into the final values.
5312  SmallVector<SDValue, 4> Values(ValueVTs.size());
5313  SmallVector<SDValue, 8> Parts;
5314  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
5315    // Copy the legal parts from the registers.
5316    EVT ValueVT = ValueVTs[Value];
5317    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
5318    EVT RegisterVT = RegVTs[Value];
5319
5320    Parts.resize(NumRegs);
5321    for (unsigned i = 0; i != NumRegs; ++i) {
5322      SDValue P;
5323      if (Flag == 0) {
5324        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
5325      } else {
5326        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
5327        *Flag = P.getValue(2);
5328      }
5329
5330      Chain = P.getValue(1);
5331
5332      if (DisableScheduling)
5333        DAG.AssignOrdering(P.getNode(), Order);
5334
5335      // If the source register was virtual and if we know something about it,
5336      // add an assert node.
5337      if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
5338          RegisterVT.isInteger() && !RegisterVT.isVector()) {
5339        unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
5340        FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
5341        if (FLI.LiveOutRegInfo.size() > SlotNo) {
5342          FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
5343
5344          unsigned RegSize = RegisterVT.getSizeInBits();
5345          unsigned NumSignBits = LOI.NumSignBits;
5346          unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
5347
5348          // FIXME: We capture more information than the dag can represent.  For
5349          // now, just use the tightest assertzext/assertsext possible.
5350          bool isSExt = true;
5351          EVT FromVT(MVT::Other);
5352          if (NumSignBits == RegSize)
5353            isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
5354          else if (NumZeroBits >= RegSize-1)
5355            isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
5356          else if (NumSignBits > RegSize-8)
5357            isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
5358          else if (NumZeroBits >= RegSize-8)
5359            isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
5360          else if (NumSignBits > RegSize-16)
5361            isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
5362          else if (NumZeroBits >= RegSize-16)
5363            isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
5364          else if (NumSignBits > RegSize-32)
5365            isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
5366          else if (NumZeroBits >= RegSize-32)
5367            isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
5368
5369          if (FromVT != MVT::Other) {
5370            P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
5371                            RegisterVT, P, DAG.getValueType(FromVT));
5372
5373            if (DisableScheduling)
5374              DAG.AssignOrdering(P.getNode(), Order);
5375          }
5376        }
5377      }
5378
5379      Parts[i] = P;
5380    }
5381
5382    Values[Value] = getCopyFromParts(DAG, dl, Order, Parts.begin(),
5383                                     NumRegs, RegisterVT, ValueVT);
5384    if (DisableScheduling)
5385      DAG.AssignOrdering(Values[Value].getNode(), Order);
5386    Part += NumRegs;
5387    Parts.clear();
5388  }
5389
5390  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5391                            DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
5392                            &Values[0], ValueVTs.size());
5393  if (DisableScheduling)
5394    DAG.AssignOrdering(Res.getNode(), Order);
5395  return Res;
5396}
5397
5398/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
5399/// specified value into the registers specified by this object.  This uses
5400/// Chain/Flag as the input and updates them for the output Chain/Flag.
5401/// If the Flag pointer is NULL, no flag is used.
5402void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
5403                                 unsigned Order, SDValue &Chain,
5404                                 SDValue *Flag) const {
5405  // Get the list of the values's legal parts.
5406  unsigned NumRegs = Regs.size();
5407  SmallVector<SDValue, 8> Parts(NumRegs);
5408  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
5409    EVT ValueVT = ValueVTs[Value];
5410    unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
5411    EVT RegisterVT = RegVTs[Value];
5412
5413    getCopyToParts(DAG, dl, Order,
5414                   Val.getValue(Val.getResNo() + Value),
5415                   &Parts[Part], NumParts, RegisterVT);
5416    Part += NumParts;
5417  }
5418
5419  // Copy the parts into the registers.
5420  SmallVector<SDValue, 8> Chains(NumRegs);
5421  for (unsigned i = 0; i != NumRegs; ++i) {
5422    SDValue Part;
5423    if (Flag == 0) {
5424      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
5425    } else {
5426      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
5427      *Flag = Part.getValue(1);
5428    }
5429
5430    Chains[i] = Part.getValue(0);
5431
5432    if (DisableScheduling)
5433      DAG.AssignOrdering(Part.getNode(), Order);
5434  }
5435
5436  if (NumRegs == 1 || Flag)
5437    // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
5438    // flagged to it. That is the CopyToReg nodes and the user are considered
5439    // a single scheduling unit. If we create a TokenFactor and return it as
5440    // chain, then the TokenFactor is both a predecessor (operand) of the
5441    // user as well as a successor (the TF operands are flagged to the user).
5442    // c1, f1 = CopyToReg
5443    // c2, f2 = CopyToReg
5444    // c3     = TokenFactor c1, c2
5445    // ...
5446    //        = op c3, ..., f2
5447    Chain = Chains[NumRegs-1];
5448  else
5449    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
5450
5451  if (DisableScheduling)
5452    DAG.AssignOrdering(Chain.getNode(), Order);
5453}
5454
5455/// AddInlineAsmOperands - Add this value to the specified inlineasm node
5456/// operand list.  This adds the code marker and includes the number of
5457/// values added into it.
5458void RegsForValue::AddInlineAsmOperands(unsigned Code,
5459                                        bool HasMatching,unsigned MatchingIdx,
5460                                        SelectionDAG &DAG, unsigned Order,
5461                                        std::vector<SDValue> &Ops) const {
5462  assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
5463  unsigned Flag = Code | (Regs.size() << 3);
5464  if (HasMatching)
5465    Flag |= 0x80000000 | (MatchingIdx << 16);
5466  SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
5467  Ops.push_back(Res);
5468
5469  if (DisableScheduling)
5470    DAG.AssignOrdering(Res.getNode(), Order);
5471
5472  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
5473    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
5474    EVT RegisterVT = RegVTs[Value];
5475    for (unsigned i = 0; i != NumRegs; ++i) {
5476      assert(Reg < Regs.size() && "Mismatch in # registers expected");
5477      SDValue Res = DAG.getRegister(Regs[Reg++], RegisterVT);
5478      Ops.push_back(Res);
5479
5480      if (DisableScheduling)
5481        DAG.AssignOrdering(Res.getNode(), Order);
5482    }
5483  }
5484}
5485
5486/// isAllocatableRegister - If the specified register is safe to allocate,
5487/// i.e. it isn't a stack pointer or some other special register, return the
5488/// register class for the register.  Otherwise, return null.
5489static const TargetRegisterClass *
5490isAllocatableRegister(unsigned Reg, MachineFunction &MF,
5491                      const TargetLowering &TLI,
5492                      const TargetRegisterInfo *TRI) {
5493  EVT FoundVT = MVT::Other;
5494  const TargetRegisterClass *FoundRC = 0;
5495  for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
5496       E = TRI->regclass_end(); RCI != E; ++RCI) {
5497    EVT ThisVT = MVT::Other;
5498
5499    const TargetRegisterClass *RC = *RCI;
5500    // If none of the the value types for this register class are valid, we
5501    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
5502    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
5503         I != E; ++I) {
5504      if (TLI.isTypeLegal(*I)) {
5505        // If we have already found this register in a different register class,
5506        // choose the one with the largest VT specified.  For example, on
5507        // PowerPC, we favor f64 register classes over f32.
5508        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
5509          ThisVT = *I;
5510          break;
5511        }
5512      }
5513    }
5514
5515    if (ThisVT == MVT::Other) continue;
5516
5517    // NOTE: This isn't ideal.  In particular, this might allocate the
5518    // frame pointer in functions that need it (due to them not being taken
5519    // out of allocation, because a variable sized allocation hasn't been seen
5520    // yet).  This is a slight code pessimization, but should still work.
5521    for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
5522         E = RC->allocation_order_end(MF); I != E; ++I)
5523      if (*I == Reg) {
5524        // We found a matching register class.  Keep looking at others in case
5525        // we find one with larger registers that this physreg is also in.
5526        FoundRC = RC;
5527        FoundVT = ThisVT;
5528        break;
5529      }
5530  }
5531  return FoundRC;
5532}
5533
5534
5535namespace llvm {
5536/// AsmOperandInfo - This contains information for each constraint that we are
5537/// lowering.
5538class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
5539    public TargetLowering::AsmOperandInfo {
5540public:
5541  /// CallOperand - If this is the result output operand or a clobber
5542  /// this is null, otherwise it is the incoming operand to the CallInst.
5543  /// This gets modified as the asm is processed.
5544  SDValue CallOperand;
5545
5546  /// AssignedRegs - If this is a register or register class operand, this
5547  /// contains the set of register corresponding to the operand.
5548  RegsForValue AssignedRegs;
5549
5550  explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
5551    : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
5552  }
5553
5554  /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
5555  /// busy in OutputRegs/InputRegs.
5556  void MarkAllocatedRegs(bool isOutReg, bool isInReg,
5557                         std::set<unsigned> &OutputRegs,
5558                         std::set<unsigned> &InputRegs,
5559                         const TargetRegisterInfo &TRI) const {
5560    if (isOutReg) {
5561      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
5562        MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
5563    }
5564    if (isInReg) {
5565      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
5566        MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
5567    }
5568  }
5569
5570  /// getCallOperandValEVT - Return the EVT of the Value* that this operand
5571  /// corresponds to.  If there is no Value* for this operand, it returns
5572  /// MVT::Other.
5573  EVT getCallOperandValEVT(LLVMContext &Context,
5574                           const TargetLowering &TLI,
5575                           const TargetData *TD) const {
5576    if (CallOperandVal == 0) return MVT::Other;
5577
5578    if (isa<BasicBlock>(CallOperandVal))
5579      return TLI.getPointerTy();
5580
5581    const llvm::Type *OpTy = CallOperandVal->getType();
5582
5583    // If this is an indirect operand, the operand is a pointer to the
5584    // accessed type.
5585    if (isIndirect) {
5586      const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
5587      if (!PtrTy)
5588        llvm_report_error("Indirect operand for inline asm not a pointer!");
5589      OpTy = PtrTy->getElementType();
5590    }
5591
5592    // If OpTy is not a single value, it may be a struct/union that we
5593    // can tile with integers.
5594    if (!OpTy->isSingleValueType() && OpTy->isSized()) {
5595      unsigned BitSize = TD->getTypeSizeInBits(OpTy);
5596      switch (BitSize) {
5597      default: break;
5598      case 1:
5599      case 8:
5600      case 16:
5601      case 32:
5602      case 64:
5603      case 128:
5604        OpTy = IntegerType::get(Context, BitSize);
5605        break;
5606      }
5607    }
5608
5609    return TLI.getValueType(OpTy, true);
5610  }
5611
5612private:
5613  /// MarkRegAndAliases - Mark the specified register and all aliases in the
5614  /// specified set.
5615  static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
5616                                const TargetRegisterInfo &TRI) {
5617    assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
5618    Regs.insert(Reg);
5619    if (const unsigned *Aliases = TRI.getAliasSet(Reg))
5620      for (; *Aliases; ++Aliases)
5621        Regs.insert(*Aliases);
5622  }
5623};
5624} // end llvm namespace.
5625
5626
5627/// GetRegistersForValue - Assign registers (virtual or physical) for the
5628/// specified operand.  We prefer to assign virtual registers, to allow the
5629/// register allocator to handle the assignment process.  However, if the asm
5630/// uses features that we can't model on machineinstrs, we have SDISel do the
5631/// allocation.  This produces generally horrible, but correct, code.
5632///
5633///   OpInfo describes the operand.
5634///   Input and OutputRegs are the set of already allocated physical registers.
5635///
5636void SelectionDAGBuilder::
5637GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
5638                     std::set<unsigned> &OutputRegs,
5639                     std::set<unsigned> &InputRegs) {
5640  LLVMContext &Context = FuncInfo.Fn->getContext();
5641
5642  // Compute whether this value requires an input register, an output register,
5643  // or both.
5644  bool isOutReg = false;
5645  bool isInReg = false;
5646  switch (OpInfo.Type) {
5647  case InlineAsm::isOutput:
5648    isOutReg = true;
5649
5650    // If there is an input constraint that matches this, we need to reserve
5651    // the input register so no other inputs allocate to it.
5652    isInReg = OpInfo.hasMatchingInput();
5653    break;
5654  case InlineAsm::isInput:
5655    isInReg = true;
5656    isOutReg = false;
5657    break;
5658  case InlineAsm::isClobber:
5659    isOutReg = true;
5660    isInReg = true;
5661    break;
5662  }
5663
5664
5665  MachineFunction &MF = DAG.getMachineFunction();
5666  SmallVector<unsigned, 4> Regs;
5667
5668  // If this is a constraint for a single physreg, or a constraint for a
5669  // register class, find it.
5670  std::pair<unsigned, const TargetRegisterClass*> PhysReg =
5671    TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
5672                                     OpInfo.ConstraintVT);
5673
5674  unsigned NumRegs = 1;
5675  if (OpInfo.ConstraintVT != MVT::Other) {
5676    // If this is a FP input in an integer register (or visa versa) insert a bit
5677    // cast of the input value.  More generally, handle any case where the input
5678    // value disagrees with the register class we plan to stick this in.
5679    if (OpInfo.Type == InlineAsm::isInput &&
5680        PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
5681      // Try to convert to the first EVT that the reg class contains.  If the
5682      // types are identical size, use a bitcast to convert (e.g. two differing
5683      // vector types).
5684      EVT RegVT = *PhysReg.second->vt_begin();
5685      if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
5686        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5687                                         RegVT, OpInfo.CallOperand);
5688        OpInfo.ConstraintVT = RegVT;
5689      } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
5690        // If the input is a FP value and we want it in FP registers, do a
5691        // bitcast to the corresponding integer type.  This turns an f64 value
5692        // into i64, which can be passed with two i32 values on a 32-bit
5693        // machine.
5694        RegVT = EVT::getIntegerVT(Context,
5695                                  OpInfo.ConstraintVT.getSizeInBits());
5696        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5697                                         RegVT, OpInfo.CallOperand);
5698        OpInfo.ConstraintVT = RegVT;
5699      }
5700
5701      if (DisableScheduling)
5702        DAG.AssignOrdering(OpInfo.CallOperand.getNode(), SDNodeOrder);
5703    }
5704
5705    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
5706  }
5707
5708  EVT RegVT;
5709  EVT ValueVT = OpInfo.ConstraintVT;
5710
5711  // If this is a constraint for a specific physical register, like {r17},
5712  // assign it now.
5713  if (unsigned AssignedReg = PhysReg.first) {
5714    const TargetRegisterClass *RC = PhysReg.second;
5715    if (OpInfo.ConstraintVT == MVT::Other)
5716      ValueVT = *RC->vt_begin();
5717
5718    // Get the actual register value type.  This is important, because the user
5719    // may have asked for (e.g.) the AX register in i32 type.  We need to
5720    // remember that AX is actually i16 to get the right extension.
5721    RegVT = *RC->vt_begin();
5722
5723    // This is a explicit reference to a physical register.
5724    Regs.push_back(AssignedReg);
5725
5726    // If this is an expanded reference, add the rest of the regs to Regs.
5727    if (NumRegs != 1) {
5728      TargetRegisterClass::iterator I = RC->begin();
5729      for (; *I != AssignedReg; ++I)
5730        assert(I != RC->end() && "Didn't find reg!");
5731
5732      // Already added the first reg.
5733      --NumRegs; ++I;
5734      for (; NumRegs; --NumRegs, ++I) {
5735        assert(I != RC->end() && "Ran out of registers to allocate!");
5736        Regs.push_back(*I);
5737      }
5738    }
5739
5740    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5741    const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5742    OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5743    return;
5744  }
5745
5746  // Otherwise, if this was a reference to an LLVM register class, create vregs
5747  // for this reference.
5748  if (const TargetRegisterClass *RC = PhysReg.second) {
5749    RegVT = *RC->vt_begin();
5750    if (OpInfo.ConstraintVT == MVT::Other)
5751      ValueVT = RegVT;
5752
5753    // Create the appropriate number of virtual registers.
5754    MachineRegisterInfo &RegInfo = MF.getRegInfo();
5755    for (; NumRegs; --NumRegs)
5756      Regs.push_back(RegInfo.createVirtualRegister(RC));
5757
5758    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5759    return;
5760  }
5761
5762  // This is a reference to a register class that doesn't directly correspond
5763  // to an LLVM register class.  Allocate NumRegs consecutive, available,
5764  // registers from the class.
5765  std::vector<unsigned> RegClassRegs
5766    = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5767                                            OpInfo.ConstraintVT);
5768
5769  const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5770  unsigned NumAllocated = 0;
5771  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5772    unsigned Reg = RegClassRegs[i];
5773    // See if this register is available.
5774    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
5775        (isInReg  && InputRegs.count(Reg))) {    // Already used.
5776      // Make sure we find consecutive registers.
5777      NumAllocated = 0;
5778      continue;
5779    }
5780
5781    // Check to see if this register is allocatable (i.e. don't give out the
5782    // stack pointer).
5783    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5784    if (!RC) {        // Couldn't allocate this register.
5785      // Reset NumAllocated to make sure we return consecutive registers.
5786      NumAllocated = 0;
5787      continue;
5788    }
5789
5790    // Okay, this register is good, we can use it.
5791    ++NumAllocated;
5792
5793    // If we allocated enough consecutive registers, succeed.
5794    if (NumAllocated == NumRegs) {
5795      unsigned RegStart = (i-NumAllocated)+1;
5796      unsigned RegEnd   = i+1;
5797      // Mark all of the allocated registers used.
5798      for (unsigned i = RegStart; i != RegEnd; ++i)
5799        Regs.push_back(RegClassRegs[i]);
5800
5801      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5802                                         OpInfo.ConstraintVT);
5803      OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5804      return;
5805    }
5806  }
5807
5808  // Otherwise, we couldn't allocate enough registers for this.
5809}
5810
5811/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5812/// processed uses a memory 'm' constraint.
5813static bool
5814hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5815                          const TargetLowering &TLI) {
5816  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5817    InlineAsm::ConstraintInfo &CI = CInfos[i];
5818    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5819      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5820      if (CType == TargetLowering::C_Memory)
5821        return true;
5822    }
5823
5824    // Indirect operand accesses access memory.
5825    if (CI.isIndirect)
5826      return true;
5827  }
5828
5829  return false;
5830}
5831
5832/// visitInlineAsm - Handle a call to an InlineAsm object.
5833///
5834void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
5835  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5836
5837  /// ConstraintOperands - Information about all of the constraints.
5838  std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5839
5840  std::set<unsigned> OutputRegs, InputRegs;
5841
5842  // Do a prepass over the constraints, canonicalizing them, and building up the
5843  // ConstraintOperands list.
5844  std::vector<InlineAsm::ConstraintInfo>
5845    ConstraintInfos = IA->ParseConstraints();
5846
5847  bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5848
5849  SDValue Chain, Flag;
5850
5851  // We won't need to flush pending loads if this asm doesn't touch
5852  // memory and is nonvolatile.
5853  if (hasMemory || IA->hasSideEffects())
5854    Chain = getRoot();
5855  else
5856    Chain = DAG.getRoot();
5857
5858  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
5859  unsigned ResNo = 0;   // ResNo - The result number of the next output.
5860  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5861    ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5862    SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5863
5864    EVT OpVT = MVT::Other;
5865
5866    // Compute the value type for each operand.
5867    switch (OpInfo.Type) {
5868    case InlineAsm::isOutput:
5869      // Indirect outputs just consume an argument.
5870      if (OpInfo.isIndirect) {
5871        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5872        break;
5873      }
5874
5875      // The return value of the call is this value.  As such, there is no
5876      // corresponding argument.
5877      assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5878             "Bad inline asm!");
5879      if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5880        OpVT = TLI.getValueType(STy->getElementType(ResNo));
5881      } else {
5882        assert(ResNo == 0 && "Asm only has one result!");
5883        OpVT = TLI.getValueType(CS.getType());
5884      }
5885      ++ResNo;
5886      break;
5887    case InlineAsm::isInput:
5888      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5889      break;
5890    case InlineAsm::isClobber:
5891      // Nothing to do.
5892      break;
5893    }
5894
5895    // If this is an input or an indirect output, process the call argument.
5896    // BasicBlocks are labels, currently appearing only in asm's.
5897    if (OpInfo.CallOperandVal) {
5898      // Strip bitcasts, if any.  This mostly comes up for functions.
5899      OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5900
5901      if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5902        OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5903      } else {
5904        OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5905      }
5906
5907      OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5908    }
5909
5910    OpInfo.ConstraintVT = OpVT;
5911  }
5912
5913  // Second pass over the constraints: compute which constraint option to use
5914  // and assign registers to constraints that want a specific physreg.
5915  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5916    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5917
5918    // If this is an output operand with a matching input operand, look up the
5919    // matching input. If their types mismatch, e.g. one is an integer, the
5920    // other is floating point, or their sizes are different, flag it as an
5921    // error.
5922    if (OpInfo.hasMatchingInput()) {
5923      SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5924      if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5925        if ((OpInfo.ConstraintVT.isInteger() !=
5926             Input.ConstraintVT.isInteger()) ||
5927            (OpInfo.ConstraintVT.getSizeInBits() !=
5928             Input.ConstraintVT.getSizeInBits())) {
5929          llvm_report_error("Unsupported asm: input constraint"
5930                            " with a matching output constraint of incompatible"
5931                            " type!");
5932        }
5933        Input.ConstraintVT = OpInfo.ConstraintVT;
5934      }
5935    }
5936
5937    // Compute the constraint code and ConstraintType to use.
5938    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5939
5940    // If this is a memory input, and if the operand is not indirect, do what we
5941    // need to to provide an address for the memory input.
5942    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5943        !OpInfo.isIndirect) {
5944      assert(OpInfo.Type == InlineAsm::isInput &&
5945             "Can only indirectify direct input operands!");
5946
5947      // Memory operands really want the address of the value.  If we don't have
5948      // an indirect input, put it in the constpool if we can, otherwise spill
5949      // it to a stack slot.
5950
5951      // If the operand is a float, integer, or vector constant, spill to a
5952      // constant pool entry to get its address.
5953      Value *OpVal = OpInfo.CallOperandVal;
5954      if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5955          isa<ConstantVector>(OpVal)) {
5956        OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5957                                                 TLI.getPointerTy());
5958      } else {
5959        // Otherwise, create a stack slot and emit a store to it before the
5960        // asm.
5961        const Type *Ty = OpVal->getType();
5962        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5963        unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5964        MachineFunction &MF = DAG.getMachineFunction();
5965        int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
5966        SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5967        Chain = DAG.getStore(Chain, getCurDebugLoc(),
5968                             OpInfo.CallOperand, StackSlot, NULL, 0);
5969        OpInfo.CallOperand = StackSlot;
5970      }
5971
5972      // There is no longer a Value* corresponding to this operand.
5973      OpInfo.CallOperandVal = 0;
5974
5975      // It is now an indirect operand.
5976      OpInfo.isIndirect = true;
5977    }
5978
5979    // If this constraint is for a specific register, allocate it before
5980    // anything else.
5981    if (OpInfo.ConstraintType == TargetLowering::C_Register)
5982      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5983  }
5984
5985  ConstraintInfos.clear();
5986
5987  // Second pass - Loop over all of the operands, assigning virtual or physregs
5988  // to register class operands.
5989  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5990    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5991
5992    // C_Register operands have already been allocated, Other/Memory don't need
5993    // to be.
5994    if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5995      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5996  }
5997
5998  // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5999  std::vector<SDValue> AsmNodeOperands;
6000  AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
6001  AsmNodeOperands.push_back(
6002          DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
6003
6004
6005  // Loop over all of the inputs, copying the operand values into the
6006  // appropriate registers and processing the output regs.
6007  RegsForValue RetValRegs;
6008
6009  // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6010  std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6011
6012  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6013    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6014
6015    switch (OpInfo.Type) {
6016    case InlineAsm::isOutput: {
6017      if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
6018          OpInfo.ConstraintType != TargetLowering::C_Register) {
6019        // Memory output, or 'other' output (e.g. 'X' constraint).
6020        assert(OpInfo.isIndirect && "Memory output must be indirect operand");
6021
6022        // Add information to the INLINEASM node to know about this output.
6023        unsigned ResOpType = 4/*MEM*/ | (1<<3);
6024        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6025                                                        TLI.getPointerTy()));
6026        AsmNodeOperands.push_back(OpInfo.CallOperand);
6027        break;
6028      }
6029
6030      // Otherwise, this is a register or register class output.
6031
6032      // Copy the output from the appropriate register.  Find a register that
6033      // we can use.
6034      if (OpInfo.AssignedRegs.Regs.empty()) {
6035        llvm_report_error("Couldn't allocate output reg for"
6036                          " constraint '" + OpInfo.ConstraintCode + "'!");
6037      }
6038
6039      // If this is an indirect operand, store through the pointer after the
6040      // asm.
6041      if (OpInfo.isIndirect) {
6042        IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
6043                                                      OpInfo.CallOperandVal));
6044      } else {
6045        // This is the result value of the call.
6046        assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
6047               "Bad inline asm!");
6048        // Concatenate this output onto the outputs list.
6049        RetValRegs.append(OpInfo.AssignedRegs);
6050      }
6051
6052      // Add information to the INLINEASM node to know that this register is
6053      // set.
6054      OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
6055                                               6 /* EARLYCLOBBER REGDEF */ :
6056                                               2 /* REGDEF */ ,
6057                                               false,
6058                                               0,
6059                                               DAG, SDNodeOrder,
6060                                               AsmNodeOperands);
6061      break;
6062    }
6063    case InlineAsm::isInput: {
6064      SDValue InOperandVal = OpInfo.CallOperand;
6065
6066      if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
6067        // If this is required to match an output register we have already set,
6068        // just use its register.
6069        unsigned OperandNo = OpInfo.getMatchedOperand();
6070
6071        // Scan until we find the definition we already emitted of this operand.
6072        // When we find it, create a RegsForValue operand.
6073        unsigned CurOp = 2;  // The first operand.
6074        for (; OperandNo; --OperandNo) {
6075          // Advance to the next operand.
6076          unsigned OpFlag =
6077            cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6078          assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
6079                  (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
6080                  (OpFlag & 7) == 4 /*MEM*/) &&
6081                 "Skipped past definitions?");
6082          CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
6083        }
6084
6085        unsigned OpFlag =
6086          cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6087        if ((OpFlag & 7) == 2 /*REGDEF*/
6088            || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
6089          // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
6090          if (OpInfo.isIndirect) {
6091            llvm_report_error("Don't know how to handle tied indirect "
6092                              "register inputs yet!");
6093          }
6094          RegsForValue MatchedRegs;
6095          MatchedRegs.TLI = &TLI;
6096          MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
6097          EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
6098          MatchedRegs.RegVTs.push_back(RegVT);
6099          MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6100          for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
6101               i != e; ++i)
6102            MatchedRegs.Regs.
6103              push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
6104
6105          // Use the produced MatchedRegs object to
6106          MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
6107                                    SDNodeOrder, Chain, &Flag);
6108          MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
6109                                           true, OpInfo.getMatchedOperand(),
6110                                           DAG, SDNodeOrder, AsmNodeOperands);
6111          break;
6112        } else {
6113          assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
6114          assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
6115                 "Unexpected number of operands");
6116          // Add information to the INLINEASM node to know about this input.
6117          // See InlineAsm.h isUseOperandTiedToDef.
6118          OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
6119          AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
6120                                                          TLI.getPointerTy()));
6121          AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
6122          break;
6123        }
6124      }
6125
6126      if (OpInfo.ConstraintType == TargetLowering::C_Other) {
6127        assert(!OpInfo.isIndirect &&
6128               "Don't know how to handle indirect other inputs yet!");
6129
6130        std::vector<SDValue> Ops;
6131        TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
6132                                         hasMemory, Ops, DAG);
6133        if (Ops.empty()) {
6134          llvm_report_error("Invalid operand for inline asm"
6135                            " constraint '" + OpInfo.ConstraintCode + "'!");
6136        }
6137
6138        // Add information to the INLINEASM node to know about this input.
6139        unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
6140        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6141                                                        TLI.getPointerTy()));
6142        AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
6143        break;
6144      } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
6145        assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
6146        assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
6147               "Memory operands expect pointer values");
6148
6149        // Add information to the INLINEASM node to know about this input.
6150        unsigned ResOpType = 4/*MEM*/ | (1<<3);
6151        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6152                                                        TLI.getPointerTy()));
6153        AsmNodeOperands.push_back(InOperandVal);
6154        break;
6155      }
6156
6157      assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
6158              OpInfo.ConstraintType == TargetLowering::C_Register) &&
6159             "Unknown constraint type!");
6160      assert(!OpInfo.isIndirect &&
6161             "Don't know how to handle indirect register inputs yet!");
6162
6163      // Copy the input into the appropriate registers.
6164      if (OpInfo.AssignedRegs.Regs.empty()) {
6165        llvm_report_error("Couldn't allocate input reg for"
6166                          " constraint '"+ OpInfo.ConstraintCode +"'!");
6167      }
6168
6169      OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
6170                                        SDNodeOrder, Chain, &Flag);
6171
6172      OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
6173                                               DAG, SDNodeOrder,
6174                                               AsmNodeOperands);
6175      break;
6176    }
6177    case InlineAsm::isClobber: {
6178      // Add the clobbered value to the operand list, so that the register
6179      // allocator is aware that the physreg got clobbered.
6180      if (!OpInfo.AssignedRegs.Regs.empty())
6181        OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
6182                                                 false, 0, DAG, SDNodeOrder,
6183                                                 AsmNodeOperands);
6184      break;
6185    }
6186    }
6187  }
6188
6189  // Finish up input operands.
6190  AsmNodeOperands[0] = Chain;
6191  if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
6192
6193  Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
6194                      DAG.getVTList(MVT::Other, MVT::Flag),
6195                      &AsmNodeOperands[0], AsmNodeOperands.size());
6196  Flag = Chain.getValue(1);
6197
6198  // If this asm returns a register value, copy the result from that register
6199  // and set it as the value of the call.
6200  if (!RetValRegs.Regs.empty()) {
6201    SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
6202                                             SDNodeOrder, Chain, &Flag);
6203
6204    // FIXME: Why don't we do this for inline asms with MRVs?
6205    if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
6206      EVT ResultType = TLI.getValueType(CS.getType());
6207
6208      // If any of the results of the inline asm is a vector, it may have the
6209      // wrong width/num elts.  This can happen for register classes that can
6210      // contain multiple different value types.  The preg or vreg allocated may
6211      // not have the same VT as was expected.  Convert it to the right type
6212      // with bit_convert.
6213      if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
6214        Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
6215                          ResultType, Val);
6216
6217      } else if (ResultType != Val.getValueType() &&
6218                 ResultType.isInteger() && Val.getValueType().isInteger()) {
6219        // If a result value was tied to an input value, the computed result may
6220        // have a wider width than the expected result.  Extract the relevant
6221        // portion.
6222        Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
6223      }
6224
6225      assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
6226    }
6227
6228    setValue(CS.getInstruction(), Val);
6229    // Don't need to use this as a chain in this case.
6230    if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
6231      return;
6232  }
6233
6234  std::vector<std::pair<SDValue, Value*> > StoresToEmit;
6235
6236  // Process indirect outputs, first output all of the flagged copies out of
6237  // physregs.
6238  for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
6239    RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
6240    Value *Ptr = IndirectStoresToEmit[i].second;
6241    SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
6242                                             SDNodeOrder, Chain, &Flag);
6243    StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
6244
6245  }
6246
6247  // Emit the non-flagged stores from the physregs.
6248  SmallVector<SDValue, 8> OutChains;
6249  for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
6250    SDValue Val = DAG.getStore(Chain, getCurDebugLoc(),
6251                               StoresToEmit[i].first,
6252                               getValue(StoresToEmit[i].second),
6253                               StoresToEmit[i].second, 0);
6254    OutChains.push_back(Val);
6255  }
6256
6257  if (!OutChains.empty())
6258    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
6259                        &OutChains[0], OutChains.size());
6260
6261  DAG.setRoot(Chain);
6262}
6263
6264void SelectionDAGBuilder::visitVAStart(CallInst &I) {
6265  DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
6266                          MVT::Other, getRoot(),
6267                          getValue(I.getOperand(1)),
6268                          DAG.getSrcValue(I.getOperand(1))));
6269}
6270
6271void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
6272  SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
6273                           getRoot(), getValue(I.getOperand(0)),
6274                           DAG.getSrcValue(I.getOperand(0)));
6275  setValue(&I, V);
6276  DAG.setRoot(V.getValue(1));
6277}
6278
6279void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
6280  DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
6281                          MVT::Other, getRoot(),
6282                          getValue(I.getOperand(1)),
6283                          DAG.getSrcValue(I.getOperand(1))));
6284}
6285
6286void SelectionDAGBuilder::visitVACopy(CallInst &I) {
6287  DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
6288                          MVT::Other, getRoot(),
6289                          getValue(I.getOperand(1)),
6290                          getValue(I.getOperand(2)),
6291                          DAG.getSrcValue(I.getOperand(1)),
6292                          DAG.getSrcValue(I.getOperand(2))));
6293}
6294
6295/// TargetLowering::LowerCallTo - This is the default LowerCallTo
6296/// implementation, which just calls LowerCall.
6297/// FIXME: When all targets are
6298/// migrated to using LowerCall, this hook should be integrated into SDISel.
6299std::pair<SDValue, SDValue>
6300TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
6301                            bool RetSExt, bool RetZExt, bool isVarArg,
6302                            bool isInreg, unsigned NumFixedArgs,
6303                            CallingConv::ID CallConv, bool isTailCall,
6304                            bool isReturnValueUsed,
6305                            SDValue Callee,
6306                            ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl,
6307                            unsigned Order) {
6308  assert((!isTailCall || PerformTailCallOpt) &&
6309         "isTailCall set when tail-call optimizations are disabled!");
6310
6311  // Handle all of the outgoing arguments.
6312  SmallVector<ISD::OutputArg, 32> Outs;
6313  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
6314    SmallVector<EVT, 4> ValueVTs;
6315    ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
6316    for (unsigned Value = 0, NumValues = ValueVTs.size();
6317         Value != NumValues; ++Value) {
6318      EVT VT = ValueVTs[Value];
6319      const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
6320      SDValue Op = SDValue(Args[i].Node.getNode(),
6321                           Args[i].Node.getResNo() + Value);
6322      ISD::ArgFlagsTy Flags;
6323      unsigned OriginalAlignment =
6324        getTargetData()->getABITypeAlignment(ArgTy);
6325
6326      if (Args[i].isZExt)
6327        Flags.setZExt();
6328      if (Args[i].isSExt)
6329        Flags.setSExt();
6330      if (Args[i].isInReg)
6331        Flags.setInReg();
6332      if (Args[i].isSRet)
6333        Flags.setSRet();
6334      if (Args[i].isByVal) {
6335        Flags.setByVal();
6336        const PointerType *Ty = cast<PointerType>(Args[i].Ty);
6337        const Type *ElementTy = Ty->getElementType();
6338        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
6339        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
6340        // For ByVal, alignment should come from FE.  BE will guess if this
6341        // info is not there but there are cases it cannot get right.
6342        if (Args[i].Alignment)
6343          FrameAlign = Args[i].Alignment;
6344        Flags.setByValAlign(FrameAlign);
6345        Flags.setByValSize(FrameSize);
6346      }
6347      if (Args[i].isNest)
6348        Flags.setNest();
6349      Flags.setOrigAlign(OriginalAlignment);
6350
6351      EVT PartVT = getRegisterType(RetTy->getContext(), VT);
6352      unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
6353      SmallVector<SDValue, 4> Parts(NumParts);
6354      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
6355
6356      if (Args[i].isSExt)
6357        ExtendKind = ISD::SIGN_EXTEND;
6358      else if (Args[i].isZExt)
6359        ExtendKind = ISD::ZERO_EXTEND;
6360
6361      getCopyToParts(DAG, dl, Order, Op, &Parts[0], NumParts,
6362                     PartVT, ExtendKind);
6363
6364      for (unsigned j = 0; j != NumParts; ++j) {
6365        // if it isn't first piece, alignment must be 1
6366        ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
6367        if (NumParts > 1 && j == 0)
6368          MyFlags.Flags.setSplit();
6369        else if (j != 0)
6370          MyFlags.Flags.setOrigAlign(1);
6371
6372        Outs.push_back(MyFlags);
6373      }
6374    }
6375  }
6376
6377  // Handle the incoming return values from the call.
6378  SmallVector<ISD::InputArg, 32> Ins;
6379  SmallVector<EVT, 4> RetTys;
6380  ComputeValueVTs(*this, RetTy, RetTys);
6381  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
6382    EVT VT = RetTys[I];
6383    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
6384    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
6385    for (unsigned i = 0; i != NumRegs; ++i) {
6386      ISD::InputArg MyFlags;
6387      MyFlags.VT = RegisterVT;
6388      MyFlags.Used = isReturnValueUsed;
6389      if (RetSExt)
6390        MyFlags.Flags.setSExt();
6391      if (RetZExt)
6392        MyFlags.Flags.setZExt();
6393      if (isInreg)
6394        MyFlags.Flags.setInReg();
6395      Ins.push_back(MyFlags);
6396    }
6397  }
6398
6399  // Check if target-dependent constraints permit a tail call here.
6400  // Target-independent constraints should be checked by the caller.
6401  if (isTailCall &&
6402      !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
6403    isTailCall = false;
6404
6405  SmallVector<SDValue, 4> InVals;
6406  Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
6407                    Outs, Ins, dl, DAG, InVals);
6408
6409  // Verify that the target's LowerCall behaved as expected.
6410  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
6411         "LowerCall didn't return a valid chain!");
6412  assert((!isTailCall || InVals.empty()) &&
6413         "LowerCall emitted a return value for a tail call!");
6414  assert((isTailCall || InVals.size() == Ins.size()) &&
6415         "LowerCall didn't emit the correct number of values!");
6416  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
6417          assert(InVals[i].getNode() &&
6418                 "LowerCall emitted a null value!");
6419          assert(Ins[i].VT == InVals[i].getValueType() &&
6420                 "LowerCall emitted a value with the wrong type!");
6421        });
6422
6423  if (DisableScheduling)
6424    DAG.AssignOrdering(Chain.getNode(), Order);
6425
6426  // For a tail call, the return value is merely live-out and there aren't
6427  // any nodes in the DAG representing it. Return a special value to
6428  // indicate that a tail call has been emitted and no more Instructions
6429  // should be processed in the current block.
6430  if (isTailCall) {
6431    DAG.setRoot(Chain);
6432    return std::make_pair(SDValue(), SDValue());
6433  }
6434
6435  // Collect the legal value parts into potentially illegal values
6436  // that correspond to the original function's return values.
6437  ISD::NodeType AssertOp = ISD::DELETED_NODE;
6438  if (RetSExt)
6439    AssertOp = ISD::AssertSext;
6440  else if (RetZExt)
6441    AssertOp = ISD::AssertZext;
6442  SmallVector<SDValue, 4> ReturnValues;
6443  unsigned CurReg = 0;
6444  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
6445    EVT VT = RetTys[I];
6446    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
6447    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
6448
6449    SDValue ReturnValue =
6450      getCopyFromParts(DAG, dl, Order, &InVals[CurReg], NumRegs,
6451                       RegisterVT, VT, AssertOp);
6452    ReturnValues.push_back(ReturnValue);
6453    if (DisableScheduling)
6454      DAG.AssignOrdering(ReturnValue.getNode(), Order);
6455    CurReg += NumRegs;
6456  }
6457
6458  // For a function returning void, there is no return value. We can't create
6459  // such a node, so we just return a null return value in that case. In
6460  // that case, nothing will actualy look at the value.
6461  if (ReturnValues.empty())
6462    return std::make_pair(SDValue(), Chain);
6463
6464  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
6465                            DAG.getVTList(&RetTys[0], RetTys.size()),
6466                            &ReturnValues[0], ReturnValues.size());
6467  if (DisableScheduling)
6468    DAG.AssignOrdering(Res.getNode(), Order);
6469  return std::make_pair(Res, Chain);
6470}
6471
6472void TargetLowering::LowerOperationWrapper(SDNode *N,
6473                                           SmallVectorImpl<SDValue> &Results,
6474                                           SelectionDAG &DAG) {
6475  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
6476  if (Res.getNode())
6477    Results.push_back(Res);
6478}
6479
6480SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
6481  llvm_unreachable("LowerOperation not implemented for this target!");
6482  return SDValue();
6483}
6484
6485void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
6486  SDValue Op = getValue(V);
6487  assert((Op.getOpcode() != ISD::CopyFromReg ||
6488          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
6489         "Copy from a reg to the same reg!");
6490  assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
6491
6492  RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
6493  SDValue Chain = DAG.getEntryNode();
6494  RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), SDNodeOrder, Chain, 0);
6495  PendingExports.push_back(Chain);
6496}
6497
6498#include "llvm/CodeGen/SelectionDAGISel.h"
6499
6500void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
6501  // If this is the entry block, emit arguments.
6502  Function &F = *LLVMBB->getParent();
6503  SelectionDAG &DAG = SDB->DAG;
6504  SDValue OldRoot = DAG.getRoot();
6505  DebugLoc dl = SDB->getCurDebugLoc();
6506  const TargetData *TD = TLI.getTargetData();
6507  SmallVector<ISD::InputArg, 16> Ins;
6508
6509  // Check whether the function can return without sret-demotion.
6510  SmallVector<EVT, 4> OutVTs;
6511  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
6512  getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
6513                OutVTs, OutsFlags, TLI);
6514  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
6515
6516  FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
6517                                          OutVTs, OutsFlags, DAG);
6518  if (!FLI.CanLowerReturn) {
6519    // Put in an sret pointer parameter before all the other parameters.
6520    SmallVector<EVT, 1> ValueVTs;
6521    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
6522
6523    // NOTE: Assuming that a pointer will never break down to more than one VT
6524    // or one register.
6525    ISD::ArgFlagsTy Flags;
6526    Flags.setSRet();
6527    EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), ValueVTs[0]);
6528    ISD::InputArg RetArg(Flags, RegisterVT, true);
6529    Ins.push_back(RetArg);
6530  }
6531
6532  // Set up the incoming argument description vector.
6533  unsigned Idx = 1;
6534  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
6535       I != E; ++I, ++Idx) {
6536    SmallVector<EVT, 4> ValueVTs;
6537    ComputeValueVTs(TLI, I->getType(), ValueVTs);
6538    bool isArgValueUsed = !I->use_empty();
6539    for (unsigned Value = 0, NumValues = ValueVTs.size();
6540         Value != NumValues; ++Value) {
6541      EVT VT = ValueVTs[Value];
6542      const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
6543      ISD::ArgFlagsTy Flags;
6544      unsigned OriginalAlignment =
6545        TD->getABITypeAlignment(ArgTy);
6546
6547      if (F.paramHasAttr(Idx, Attribute::ZExt))
6548        Flags.setZExt();
6549      if (F.paramHasAttr(Idx, Attribute::SExt))
6550        Flags.setSExt();
6551      if (F.paramHasAttr(Idx, Attribute::InReg))
6552        Flags.setInReg();
6553      if (F.paramHasAttr(Idx, Attribute::StructRet))
6554        Flags.setSRet();
6555      if (F.paramHasAttr(Idx, Attribute::ByVal)) {
6556        Flags.setByVal();
6557        const PointerType *Ty = cast<PointerType>(I->getType());
6558        const Type *ElementTy = Ty->getElementType();
6559        unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
6560        unsigned FrameSize  = TD->getTypeAllocSize(ElementTy);
6561        // For ByVal, alignment should be passed from FE.  BE will guess if
6562        // this info is not there but there are cases it cannot get right.
6563        if (F.getParamAlignment(Idx))
6564          FrameAlign = F.getParamAlignment(Idx);
6565        Flags.setByValAlign(FrameAlign);
6566        Flags.setByValSize(FrameSize);
6567      }
6568      if (F.paramHasAttr(Idx, Attribute::Nest))
6569        Flags.setNest();
6570      Flags.setOrigAlign(OriginalAlignment);
6571
6572      EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
6573      unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6574      for (unsigned i = 0; i != NumRegs; ++i) {
6575        ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
6576        if (NumRegs > 1 && i == 0)
6577          MyFlags.Flags.setSplit();
6578        // if it isn't first piece, alignment must be 1
6579        else if (i > 0)
6580          MyFlags.Flags.setOrigAlign(1);
6581        Ins.push_back(MyFlags);
6582      }
6583    }
6584  }
6585
6586  // Call the target to set up the argument values.
6587  SmallVector<SDValue, 8> InVals;
6588  SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
6589                                             F.isVarArg(), Ins,
6590                                             dl, DAG, InVals);
6591
6592  // Verify that the target's LowerFormalArguments behaved as expected.
6593  assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
6594         "LowerFormalArguments didn't return a valid chain!");
6595  assert(InVals.size() == Ins.size() &&
6596         "LowerFormalArguments didn't emit the correct number of values!");
6597  DEBUG({
6598      for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
6599        assert(InVals[i].getNode() &&
6600               "LowerFormalArguments emitted a null value!");
6601        assert(Ins[i].VT == InVals[i].getValueType() &&
6602               "LowerFormalArguments emitted a value with the wrong type!");
6603      }
6604    });
6605
6606  // Update the DAG with the new chain value resulting from argument lowering.
6607  DAG.setRoot(NewRoot);
6608
6609  // Set up the argument values.
6610  unsigned i = 0;
6611  Idx = 1;
6612  if (!FLI.CanLowerReturn) {
6613    // Create a virtual register for the sret pointer, and put in a copy
6614    // from the sret argument into it.
6615    SmallVector<EVT, 1> ValueVTs;
6616    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
6617    EVT VT = ValueVTs[0];
6618    EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
6619    ISD::NodeType AssertOp = ISD::DELETED_NODE;
6620    SDValue ArgValue = getCopyFromParts(DAG, dl, 0, &InVals[0], 1,
6621                                        RegVT, VT, AssertOp);
6622
6623    MachineFunction& MF = SDB->DAG.getMachineFunction();
6624    MachineRegisterInfo& RegInfo = MF.getRegInfo();
6625    unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
6626    FLI.DemoteRegister = SRetReg;
6627    NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(), SRetReg, ArgValue);
6628    DAG.setRoot(NewRoot);
6629
6630    // i indexes lowered arguments.  Bump it past the hidden sret argument.
6631    // Idx indexes LLVM arguments.  Don't touch it.
6632    ++i;
6633  }
6634
6635  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
6636      ++I, ++Idx) {
6637    SmallVector<SDValue, 4> ArgValues;
6638    SmallVector<EVT, 4> ValueVTs;
6639    ComputeValueVTs(TLI, I->getType(), ValueVTs);
6640    unsigned NumValues = ValueVTs.size();
6641    for (unsigned Value = 0; Value != NumValues; ++Value) {
6642      EVT VT = ValueVTs[Value];
6643      EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
6644      unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6645
6646      if (!I->use_empty()) {
6647        ISD::NodeType AssertOp = ISD::DELETED_NODE;
6648        if (F.paramHasAttr(Idx, Attribute::SExt))
6649          AssertOp = ISD::AssertSext;
6650        else if (F.paramHasAttr(Idx, Attribute::ZExt))
6651          AssertOp = ISD::AssertZext;
6652
6653        ArgValues.push_back(getCopyFromParts(DAG, dl, 0, &InVals[i],
6654                                             NumParts, PartVT, VT,
6655                                             AssertOp));
6656      }
6657
6658      i += NumParts;
6659    }
6660
6661    if (!I->use_empty()) {
6662      SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
6663                                       SDB->getCurDebugLoc());
6664      SDB->setValue(I, Res);
6665
6666      // If this argument is live outside of the entry block, insert a copy from
6667      // whereever we got it to the vreg that other BB's will reference it as.
6668      SDB->CopyToExportRegsIfNeeded(I);
6669    }
6670  }
6671
6672  assert(i == InVals.size() && "Argument register count mismatch!");
6673
6674  // Finally, if the target has anything special to do, allow it to do so.
6675  // FIXME: this should insert code into the DAG!
6676  EmitFunctionEntryCode(F, SDB->DAG.getMachineFunction());
6677}
6678
6679/// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
6680/// ensure constants are generated when needed.  Remember the virtual registers
6681/// that need to be added to the Machine PHI nodes as input.  We cannot just
6682/// directly add them, because expansion might result in multiple MBB's for one
6683/// BB.  As such, the start of the BB might correspond to a different MBB than
6684/// the end.
6685///
6686void
6687SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
6688  TerminatorInst *TI = LLVMBB->getTerminator();
6689
6690  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6691
6692  // Check successor nodes' PHI nodes that expect a constant to be available
6693  // from this block.
6694  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6695    BasicBlock *SuccBB = TI->getSuccessor(succ);
6696    if (!isa<PHINode>(SuccBB->begin())) continue;
6697    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6698
6699    // If this terminator has multiple identical successors (common for
6700    // switches), only handle each succ once.
6701    if (!SuccsHandled.insert(SuccMBB)) continue;
6702
6703    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6704    PHINode *PN;
6705
6706    // At this point we know that there is a 1-1 correspondence between LLVM PHI
6707    // nodes and Machine PHI nodes, but the incoming operands have not been
6708    // emitted yet.
6709    for (BasicBlock::iterator I = SuccBB->begin();
6710         (PN = dyn_cast<PHINode>(I)); ++I) {
6711      // Ignore dead phi's.
6712      if (PN->use_empty()) continue;
6713
6714      unsigned Reg;
6715      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6716
6717      if (Constant *C = dyn_cast<Constant>(PHIOp)) {
6718        unsigned &RegOut = SDB->ConstantsOut[C];
6719        if (RegOut == 0) {
6720          RegOut = FuncInfo->CreateRegForValue(C);
6721          SDB->CopyValueToVirtualRegister(C, RegOut);
6722        }
6723        Reg = RegOut;
6724      } else {
6725        Reg = FuncInfo->ValueMap[PHIOp];
6726        if (Reg == 0) {
6727          assert(isa<AllocaInst>(PHIOp) &&
6728                 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
6729                 "Didn't codegen value into a register!??");
6730          Reg = FuncInfo->CreateRegForValue(PHIOp);
6731          SDB->CopyValueToVirtualRegister(PHIOp, Reg);
6732        }
6733      }
6734
6735      // Remember that this register needs to added to the machine PHI node as
6736      // the input for this MBB.
6737      SmallVector<EVT, 4> ValueVTs;
6738      ComputeValueVTs(TLI, PN->getType(), ValueVTs);
6739      for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
6740        EVT VT = ValueVTs[vti];
6741        unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6742        for (unsigned i = 0, e = NumRegisters; i != e; ++i)
6743          SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
6744        Reg += NumRegisters;
6745      }
6746    }
6747  }
6748  SDB->ConstantsOut.clear();
6749}
6750
6751/// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
6752/// supports legal types, and it emits MachineInstrs directly instead of
6753/// creating SelectionDAG nodes.
6754///
6755bool
6756SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
6757                                                      FastISel *F) {
6758  TerminatorInst *TI = LLVMBB->getTerminator();
6759
6760  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6761  unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
6762
6763  // Check successor nodes' PHI nodes that expect a constant to be available
6764  // from this block.
6765  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6766    BasicBlock *SuccBB = TI->getSuccessor(succ);
6767    if (!isa<PHINode>(SuccBB->begin())) continue;
6768    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6769
6770    // If this terminator has multiple identical successors (common for
6771    // switches), only handle each succ once.
6772    if (!SuccsHandled.insert(SuccMBB)) continue;
6773
6774    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6775    PHINode *PN;
6776
6777    // At this point we know that there is a 1-1 correspondence between LLVM PHI
6778    // nodes and Machine PHI nodes, but the incoming operands have not been
6779    // emitted yet.
6780    for (BasicBlock::iterator I = SuccBB->begin();
6781         (PN = dyn_cast<PHINode>(I)); ++I) {
6782      // Ignore dead phi's.
6783      if (PN->use_empty()) continue;
6784
6785      // Only handle legal types. Two interesting things to note here. First,
6786      // by bailing out early, we may leave behind some dead instructions,
6787      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6788      // own moves. Second, this check is necessary becuase FastISel doesn't
6789      // use CreateRegForValue to create registers, so it always creates
6790      // exactly one register for each non-void instruction.
6791      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6792      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6793        // Promote MVT::i1.
6794        if (VT == MVT::i1)
6795          VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
6796        else {
6797          SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6798          return false;
6799        }
6800      }
6801
6802      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6803
6804      unsigned Reg = F->getRegForValue(PHIOp);
6805      if (Reg == 0) {
6806        SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6807        return false;
6808      }
6809      SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
6810    }
6811  }
6812
6813  return true;
6814}
6815