SelectionDAGBuilder.cpp revision ca6c93430e98e3fb0eaf40aa543ea1dc45cf35d7
1//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating from LLVM IR into SelectionDAG IR.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "SelectionDAGBuilder.h"
16#include "FunctionLoweringInfo.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/ConstantFolding.h"
21#include "llvm/Constants.h"
22#include "llvm/CallingConv.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Function.h"
25#include "llvm/GlobalVariable.h"
26#include "llvm/InlineAsm.h"
27#include "llvm/Instructions.h"
28#include "llvm/Intrinsics.h"
29#include "llvm/IntrinsicInst.h"
30#include "llvm/Module.h"
31#include "llvm/CodeGen/FastISel.h"
32#include "llvm/CodeGen/GCStrategy.h"
33#include "llvm/CodeGen/GCMetadata.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/PseudoSourceValue.h"
41#include "llvm/CodeGen/SelectionDAG.h"
42#include "llvm/CodeGen/DwarfWriter.h"
43#include "llvm/Analysis/DebugInfo.h"
44#include "llvm/Target/TargetRegisterInfo.h"
45#include "llvm/Target/TargetData.h"
46#include "llvm/Target/TargetFrameInfo.h"
47#include "llvm/Target/TargetInstrInfo.h"
48#include "llvm/Target/TargetIntrinsicInfo.h"
49#include "llvm/Target/TargetLowering.h"
50#include "llvm/Target/TargetOptions.h"
51#include "llvm/Support/Compiler.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MathExtras.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58using namespace llvm;
59
60/// LimitFloatPrecision - Generate low-precision inline sequences for
61/// some float libcalls (6, 8 or 12 bits).
62static unsigned LimitFloatPrecision;
63
64static cl::opt<unsigned, true>
65LimitFPPrecision("limit-float-precision",
66                 cl::desc("Generate low-precision inline sequences "
67                          "for some float libcalls"),
68                 cl::location(LimitFloatPrecision),
69                 cl::init(0));
70
71namespace {
72  /// RegsForValue - This struct represents the registers (physical or virtual)
73  /// that a particular set of values is assigned, and the type information
74  /// about the value. The most common situation is to represent one value at a
75  /// time, but struct or array values are handled element-wise as multiple
76  /// values.  The splitting of aggregates is performed recursively, so that we
77  /// never have aggregate-typed registers. The values at this point do not
78  /// necessarily have legal types, so each value may require one or more
79  /// registers of some legal type.
80  ///
81  struct RegsForValue {
82    /// TLI - The TargetLowering object.
83    ///
84    const TargetLowering *TLI;
85
86    /// ValueVTs - The value types of the values, which may not be legal, and
87    /// may need be promoted or synthesized from one or more registers.
88    ///
89    SmallVector<EVT, 4> ValueVTs;
90
91    /// RegVTs - The value types of the registers. This is the same size as
92    /// ValueVTs and it records, for each value, what the type of the assigned
93    /// register or registers are. (Individual values are never synthesized
94    /// from more than one type of register.)
95    ///
96    /// With virtual registers, the contents of RegVTs is redundant with TLI's
97    /// getRegisterType member function, however when with physical registers
98    /// it is necessary to have a separate record of the types.
99    ///
100    SmallVector<EVT, 4> RegVTs;
101
102    /// Regs - This list holds the registers assigned to the values.
103    /// Each legal or promoted value requires one register, and each
104    /// expanded value requires multiple registers.
105    ///
106    SmallVector<unsigned, 4> Regs;
107
108    RegsForValue() : TLI(0) {}
109
110    RegsForValue(const TargetLowering &tli,
111                 const SmallVector<unsigned, 4> &regs,
112                 EVT regvt, EVT valuevt)
113      : TLI(&tli),  ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
114    RegsForValue(const TargetLowering &tli,
115                 const SmallVector<unsigned, 4> &regs,
116                 const SmallVector<EVT, 4> &regvts,
117                 const SmallVector<EVT, 4> &valuevts)
118      : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
119    RegsForValue(LLVMContext &Context, const TargetLowering &tli,
120                 unsigned Reg, const Type *Ty) : TLI(&tli) {
121      ComputeValueVTs(tli, Ty, ValueVTs);
122
123      for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
124        EVT ValueVT = ValueVTs[Value];
125        unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
126        EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
127        for (unsigned i = 0; i != NumRegs; ++i)
128          Regs.push_back(Reg + i);
129        RegVTs.push_back(RegisterVT);
130        Reg += NumRegs;
131      }
132    }
133
134    /// append - Add the specified values to this one.
135    void append(const RegsForValue &RHS) {
136      TLI = RHS.TLI;
137      ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
138      RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
139      Regs.append(RHS.Regs.begin(), RHS.Regs.end());
140    }
141
142
143    /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
144    /// this value and returns the result as a ValueVTs value.  This uses
145    /// Chain/Flag as the input and updates them for the output Chain/Flag.
146    /// If the Flag pointer is NULL, no flag is used.
147    SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
148                            SDValue &Chain, SDValue *Flag) const;
149
150    /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
151    /// specified value into the registers specified by this object.  This uses
152    /// Chain/Flag as the input and updates them for the output Chain/Flag.
153    /// If the Flag pointer is NULL, no flag is used.
154    void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
155                       unsigned Order, SDValue &Chain, SDValue *Flag) const;
156
157    /// AddInlineAsmOperands - Add this value to the specified inlineasm node
158    /// operand list.  This adds the code marker, matching input operand index
159    /// (if applicable), and includes the number of values added into it.
160    void AddInlineAsmOperands(unsigned Code,
161                              bool HasMatching, unsigned MatchingIdx,
162                              SelectionDAG &DAG, unsigned Order,
163                              std::vector<SDValue> &Ops) const;
164  };
165}
166
167/// getCopyFromParts - Create a value that contains the specified legal parts
168/// combined into the value they represent.  If the parts combine to a type
169/// larger then ValueVT then AssertOp can be used to specify whether the extra
170/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
171/// (ISD::AssertSext).
172static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
173                                const SDValue *Parts,
174                                unsigned NumParts, EVT PartVT, EVT ValueVT,
175                                ISD::NodeType AssertOp = ISD::DELETED_NODE) {
176  assert(NumParts > 0 && "No parts to assemble!");
177  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
178  SDValue Val = Parts[0];
179
180  if (NumParts > 1) {
181    // Assemble the value from multiple parts.
182    if (!ValueVT.isVector() && ValueVT.isInteger()) {
183      unsigned PartBits = PartVT.getSizeInBits();
184      unsigned ValueBits = ValueVT.getSizeInBits();
185
186      // Assemble the power of 2 part.
187      unsigned RoundParts = NumParts & (NumParts - 1) ?
188        1 << Log2_32(NumParts) : NumParts;
189      unsigned RoundBits = PartBits * RoundParts;
190      EVT RoundVT = RoundBits == ValueBits ?
191        ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
192      SDValue Lo, Hi;
193
194      EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
195
196      if (RoundParts > 2) {
197        Lo = getCopyFromParts(DAG, dl, Order, Parts, RoundParts / 2,
198                              PartVT, HalfVT);
199        Hi = getCopyFromParts(DAG, dl, Order, Parts + RoundParts / 2,
200                              RoundParts / 2, PartVT, HalfVT);
201      } else {
202        Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
203        Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
204      }
205
206      if (TLI.isBigEndian())
207        std::swap(Lo, Hi);
208
209      Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
210
211      if (RoundParts < NumParts) {
212        // Assemble the trailing non-power-of-2 part.
213        unsigned OddParts = NumParts - RoundParts;
214        EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
215        Hi = getCopyFromParts(DAG, dl, Order,
216                              Parts + RoundParts, OddParts, PartVT, OddVT);
217
218        // Combine the round and odd parts.
219        Lo = Val;
220        if (TLI.isBigEndian())
221          std::swap(Lo, Hi);
222        EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
223        Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
224        Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
225                         DAG.getConstant(Lo.getValueType().getSizeInBits(),
226                                         TLI.getPointerTy()));
227        Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
228        Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
229      }
230    } else if (ValueVT.isVector()) {
231      // Handle a multi-element vector.
232      EVT IntermediateVT, RegisterVT;
233      unsigned NumIntermediates;
234      unsigned NumRegs =
235        TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
236                                   NumIntermediates, RegisterVT);
237      assert(NumRegs == NumParts
238             && "Part count doesn't match vector breakdown!");
239      NumParts = NumRegs; // Silence a compiler warning.
240      assert(RegisterVT == PartVT
241             && "Part type doesn't match vector breakdown!");
242      assert(RegisterVT == Parts[0].getValueType() &&
243             "Part type doesn't match part!");
244
245      // Assemble the parts into intermediate operands.
246      SmallVector<SDValue, 8> Ops(NumIntermediates);
247      if (NumIntermediates == NumParts) {
248        // If the register was not expanded, truncate or copy the value,
249        // as appropriate.
250        for (unsigned i = 0; i != NumParts; ++i)
251          Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i], 1,
252                                    PartVT, IntermediateVT);
253      } else if (NumParts > 0) {
254        // If the intermediate type was expanded, build the intermediate
255        // operands from the parts.
256        assert(NumParts % NumIntermediates == 0 &&
257               "Must expand into a divisible number of parts!");
258        unsigned Factor = NumParts / NumIntermediates;
259        for (unsigned i = 0; i != NumIntermediates; ++i)
260          Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i * Factor], Factor,
261                                    PartVT, IntermediateVT);
262      }
263
264      // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
265      // intermediate operands.
266      Val = DAG.getNode(IntermediateVT.isVector() ?
267                        ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
268                        ValueVT, &Ops[0], NumIntermediates);
269    } else if (PartVT.isFloatingPoint()) {
270      // FP split into multiple FP parts (for ppcf128)
271      assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
272             "Unexpected split");
273      SDValue Lo, Hi;
274      Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
275      Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
276      if (TLI.isBigEndian())
277        std::swap(Lo, Hi);
278      Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
279    } else {
280      // FP split into integer parts (soft fp)
281      assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
282             !PartVT.isVector() && "Unexpected split");
283      EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
284      Val = getCopyFromParts(DAG, dl, Order, Parts, NumParts, PartVT, IntVT);
285    }
286  }
287
288  // There is now one part, held in Val.  Correct it to match ValueVT.
289  PartVT = Val.getValueType();
290
291  if (PartVT == ValueVT)
292    return Val;
293
294  if (PartVT.isVector()) {
295    assert(ValueVT.isVector() && "Unknown vector conversion!");
296    return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
297  }
298
299  if (ValueVT.isVector()) {
300    assert(ValueVT.getVectorElementType() == PartVT &&
301           ValueVT.getVectorNumElements() == 1 &&
302           "Only trivial scalar-to-vector conversions should get here!");
303    return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
304  }
305
306  if (PartVT.isInteger() &&
307      ValueVT.isInteger()) {
308    if (ValueVT.bitsLT(PartVT)) {
309      // For a truncate, see if we have any information to
310      // indicate whether the truncated bits will always be
311      // zero or sign-extension.
312      if (AssertOp != ISD::DELETED_NODE)
313        Val = DAG.getNode(AssertOp, dl, PartVT, Val,
314                          DAG.getValueType(ValueVT));
315      return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
316    } else {
317      return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
318    }
319  }
320
321  if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
322    if (ValueVT.bitsLT(Val.getValueType())) {
323      // FP_ROUND's are always exact here.
324      return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
325                         DAG.getIntPtrConstant(1));
326    }
327
328    return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
329  }
330
331  if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
332    return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
333
334  llvm_unreachable("Unknown mismatch!");
335  return SDValue();
336}
337
338/// getCopyToParts - Create a series of nodes that contain the specified value
339/// split into legal parts.  If the parts contain more bits than Val, then, for
340/// integers, ExtendKind can be used to specify how to generate the extra bits.
341static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order,
342                           SDValue Val, SDValue *Parts, unsigned NumParts,
343                           EVT PartVT,
344                           ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
345  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
346  EVT PtrVT = TLI.getPointerTy();
347  EVT ValueVT = Val.getValueType();
348  unsigned PartBits = PartVT.getSizeInBits();
349  unsigned OrigNumParts = NumParts;
350  assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
351
352  if (!NumParts)
353    return;
354
355  if (!ValueVT.isVector()) {
356    if (PartVT == ValueVT) {
357      assert(NumParts == 1 && "No-op copy with multiple parts!");
358      Parts[0] = Val;
359      return;
360    }
361
362    if (NumParts * PartBits > ValueVT.getSizeInBits()) {
363      // If the parts cover more bits than the value has, promote the value.
364      if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
365        assert(NumParts == 1 && "Do not know what to promote to!");
366        Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
367      } else if (PartVT.isInteger() && ValueVT.isInteger()) {
368        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
369        Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
370      } else {
371        llvm_unreachable("Unknown mismatch!");
372      }
373    } else if (PartBits == ValueVT.getSizeInBits()) {
374      // Different types of the same size.
375      assert(NumParts == 1 && PartVT != ValueVT);
376      Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
377    } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
378      // If the parts cover less bits than value has, truncate the value.
379      if (PartVT.isInteger() && ValueVT.isInteger()) {
380        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
381        Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
382      } else {
383        llvm_unreachable("Unknown mismatch!");
384      }
385    }
386
387    // The value may have changed - recompute ValueVT.
388    ValueVT = Val.getValueType();
389    assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
390           "Failed to tile the value with PartVT!");
391
392    if (NumParts == 1) {
393      assert(PartVT == ValueVT && "Type conversion failed!");
394      Parts[0] = Val;
395      return;
396    }
397
398    // Expand the value into multiple parts.
399    if (NumParts & (NumParts - 1)) {
400      // The number of parts is not a power of 2.  Split off and copy the tail.
401      assert(PartVT.isInteger() && ValueVT.isInteger() &&
402             "Do not know what to expand to!");
403      unsigned RoundParts = 1 << Log2_32(NumParts);
404      unsigned RoundBits = RoundParts * PartBits;
405      unsigned OddParts = NumParts - RoundParts;
406      SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
407                                   DAG.getConstant(RoundBits,
408                                                   TLI.getPointerTy()));
409      getCopyToParts(DAG, dl, Order, OddVal, Parts + RoundParts,
410                     OddParts, PartVT);
411
412      if (TLI.isBigEndian())
413        // The odd parts were reversed by getCopyToParts - unreverse them.
414        std::reverse(Parts + RoundParts, Parts + NumParts);
415
416      NumParts = RoundParts;
417      ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
418      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
419    }
420
421    // The number of parts is a power of 2.  Repeatedly bisect the value using
422    // EXTRACT_ELEMENT.
423    Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
424                           EVT::getIntegerVT(*DAG.getContext(),
425                                             ValueVT.getSizeInBits()),
426                           Val);
427
428    for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
429      for (unsigned i = 0; i < NumParts; i += StepSize) {
430        unsigned ThisBits = StepSize * PartBits / 2;
431        EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
432        SDValue &Part0 = Parts[i];
433        SDValue &Part1 = Parts[i+StepSize/2];
434
435        Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
436                            ThisVT, Part0,
437                            DAG.getConstant(1, PtrVT));
438        Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
439                            ThisVT, Part0,
440                            DAG.getConstant(0, PtrVT));
441
442        if (ThisBits == PartBits && ThisVT != PartVT) {
443          Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
444                                                PartVT, Part0);
445          Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
446                                                PartVT, Part1);
447        }
448      }
449    }
450
451    if (TLI.isBigEndian())
452      std::reverse(Parts, Parts + OrigNumParts);
453
454    return;
455  }
456
457  // Vector ValueVT.
458  if (NumParts == 1) {
459    if (PartVT != ValueVT) {
460      if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
461        Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
462      } else {
463        assert(ValueVT.getVectorElementType() == PartVT &&
464               ValueVT.getVectorNumElements() == 1 &&
465               "Only trivial vector-to-scalar conversions should get here!");
466        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
467                          PartVT, Val,
468                          DAG.getConstant(0, PtrVT));
469      }
470    }
471
472    Parts[0] = Val;
473    return;
474  }
475
476  // Handle a multi-element vector.
477  EVT IntermediateVT, RegisterVT;
478  unsigned NumIntermediates;
479  unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
480                              IntermediateVT, NumIntermediates, RegisterVT);
481  unsigned NumElements = ValueVT.getVectorNumElements();
482
483  assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
484  NumParts = NumRegs; // Silence a compiler warning.
485  assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
486
487  // Split the vector into intermediate operands.
488  SmallVector<SDValue, 8> Ops(NumIntermediates);
489  for (unsigned i = 0; i != NumIntermediates; ++i) {
490    if (IntermediateVT.isVector())
491      Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
492                           IntermediateVT, Val,
493                           DAG.getConstant(i * (NumElements / NumIntermediates),
494                                           PtrVT));
495    else
496      Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
497                           IntermediateVT, Val,
498                           DAG.getConstant(i, PtrVT));
499  }
500
501  // Split the intermediate operands into legal parts.
502  if (NumParts == NumIntermediates) {
503    // If the register was not expanded, promote or copy the value,
504    // as appropriate.
505    for (unsigned i = 0; i != NumParts; ++i)
506      getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i], 1, PartVT);
507  } else if (NumParts > 0) {
508    // If the intermediate type was expanded, split each the value into
509    // legal parts.
510    assert(NumParts % NumIntermediates == 0 &&
511           "Must expand into a divisible number of parts!");
512    unsigned Factor = NumParts / NumIntermediates;
513    for (unsigned i = 0; i != NumIntermediates; ++i)
514      getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i*Factor], Factor, PartVT);
515  }
516}
517
518
519void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
520  AA = &aa;
521  GFI = gfi;
522  TD = DAG.getTarget().getTargetData();
523}
524
525/// clear - Clear out the curret SelectionDAG and the associated
526/// state and prepare this SelectionDAGBuilder object to be used
527/// for a new block. This doesn't clear out information about
528/// additional blocks that are needed to complete switch lowering
529/// or PHI node updating; that information is cleared out as it is
530/// consumed.
531void SelectionDAGBuilder::clear() {
532  NodeMap.clear();
533  PendingLoads.clear();
534  PendingExports.clear();
535  EdgeMapping.clear();
536  DAG.clear();
537  CurDebugLoc = DebugLoc::getUnknownLoc();
538  HasTailCall = false;
539}
540
541/// getRoot - Return the current virtual root of the Selection DAG,
542/// flushing any PendingLoad items. This must be done before emitting
543/// a store or any other node that may need to be ordered after any
544/// prior load instructions.
545///
546SDValue SelectionDAGBuilder::getRoot() {
547  if (PendingLoads.empty())
548    return DAG.getRoot();
549
550  if (PendingLoads.size() == 1) {
551    SDValue Root = PendingLoads[0];
552    DAG.setRoot(Root);
553    PendingLoads.clear();
554    return Root;
555  }
556
557  // Otherwise, we have to make a token factor node.
558  SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
559                               &PendingLoads[0], PendingLoads.size());
560  PendingLoads.clear();
561  DAG.setRoot(Root);
562  return Root;
563}
564
565/// getControlRoot - Similar to getRoot, but instead of flushing all the
566/// PendingLoad items, flush all the PendingExports items. It is necessary
567/// to do this before emitting a terminator instruction.
568///
569SDValue SelectionDAGBuilder::getControlRoot() {
570  SDValue Root = DAG.getRoot();
571
572  if (PendingExports.empty())
573    return Root;
574
575  // Turn all of the CopyToReg chains into one factored node.
576  if (Root.getOpcode() != ISD::EntryToken) {
577    unsigned i = 0, e = PendingExports.size();
578    for (; i != e; ++i) {
579      assert(PendingExports[i].getNode()->getNumOperands() > 1);
580      if (PendingExports[i].getNode()->getOperand(0) == Root)
581        break;  // Don't add the root if we already indirectly depend on it.
582    }
583
584    if (i == e)
585      PendingExports.push_back(Root);
586  }
587
588  Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
589                     &PendingExports[0],
590                     PendingExports.size());
591  PendingExports.clear();
592  DAG.setRoot(Root);
593  return Root;
594}
595
596void SelectionDAGBuilder::AssignOrderingToNode(const SDNode *Node) {
597  if (DAG.GetOrdering(Node) != 0) return; // Already has ordering.
598  DAG.AssignOrdering(Node, SDNodeOrder);
599
600  for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
601    AssignOrderingToNode(Node->getOperand(I).getNode());
602}
603
604void SelectionDAGBuilder::visit(Instruction &I) {
605  visit(I.getOpcode(), I);
606}
607
608void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
609  // Note: this doesn't use InstVisitor, because it has to work with
610  // ConstantExpr's in addition to instructions.
611  switch (Opcode) {
612  default: llvm_unreachable("Unknown instruction type encountered!");
613    // Build the switch statement using the Instruction.def file.
614#define HANDLE_INST(NUM, OPCODE, CLASS) \
615    case Instruction::OPCODE: visit##OPCODE((CLASS&)I); break;
616#include "llvm/Instruction.def"
617  }
618
619  // Assign the ordering to the freshly created DAG nodes.
620  if (NodeMap.count(&I)) {
621    ++SDNodeOrder;
622    AssignOrderingToNode(getValue(&I).getNode());
623  }
624}
625
626SDValue SelectionDAGBuilder::getValue(const Value *V) {
627  SDValue &N = NodeMap[V];
628  if (N.getNode()) return N;
629
630  if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
631    EVT VT = TLI.getValueType(V->getType(), true);
632
633    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
634      return N = DAG.getConstant(*CI, VT);
635
636    if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
637      return N = DAG.getGlobalAddress(GV, VT);
638
639    if (isa<ConstantPointerNull>(C))
640      return N = DAG.getConstant(0, TLI.getPointerTy());
641
642    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
643      return N = DAG.getConstantFP(*CFP, VT);
644
645    if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
646      return N = DAG.getUNDEF(VT);
647
648    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
649      visit(CE->getOpcode(), *CE);
650      SDValue N1 = NodeMap[V];
651      assert(N1.getNode() && "visit didn't populate the ValueMap!");
652      return N1;
653    }
654
655    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
656      SmallVector<SDValue, 4> Constants;
657      for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
658           OI != OE; ++OI) {
659        SDNode *Val = getValue(*OI).getNode();
660        // If the operand is an empty aggregate, there are no values.
661        if (!Val) continue;
662        // Add each leaf value from the operand to the Constants list
663        // to form a flattened list of all the values.
664        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
665          Constants.push_back(SDValue(Val, i));
666      }
667
668      return DAG.getMergeValues(&Constants[0], Constants.size(),
669                                getCurDebugLoc());
670    }
671
672    if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
673      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
674             "Unknown struct or array constant!");
675
676      SmallVector<EVT, 4> ValueVTs;
677      ComputeValueVTs(TLI, C->getType(), ValueVTs);
678      unsigned NumElts = ValueVTs.size();
679      if (NumElts == 0)
680        return SDValue(); // empty struct
681      SmallVector<SDValue, 4> Constants(NumElts);
682      for (unsigned i = 0; i != NumElts; ++i) {
683        EVT EltVT = ValueVTs[i];
684        if (isa<UndefValue>(C))
685          Constants[i] = DAG.getUNDEF(EltVT);
686        else if (EltVT.isFloatingPoint())
687          Constants[i] = DAG.getConstantFP(0, EltVT);
688        else
689          Constants[i] = DAG.getConstant(0, EltVT);
690      }
691
692      return DAG.getMergeValues(&Constants[0], NumElts,
693                                getCurDebugLoc());
694    }
695
696    if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
697      return DAG.getBlockAddress(BA, VT);
698
699    const VectorType *VecTy = cast<VectorType>(V->getType());
700    unsigned NumElements = VecTy->getNumElements();
701
702    // Now that we know the number and type of the elements, get that number of
703    // elements into the Ops array based on what kind of constant it is.
704    SmallVector<SDValue, 16> Ops;
705    if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
706      for (unsigned i = 0; i != NumElements; ++i)
707        Ops.push_back(getValue(CP->getOperand(i)));
708    } else {
709      assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
710      EVT EltVT = TLI.getValueType(VecTy->getElementType());
711
712      SDValue Op;
713      if (EltVT.isFloatingPoint())
714        Op = DAG.getConstantFP(0, EltVT);
715      else
716        Op = DAG.getConstant(0, EltVT);
717      Ops.assign(NumElements, Op);
718    }
719
720    // Create a BUILD_VECTOR node.
721    return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
722                                    VT, &Ops[0], Ops.size());
723  }
724
725  // If this is a static alloca, generate it as the frameindex instead of
726  // computation.
727  if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
728    DenseMap<const AllocaInst*, int>::iterator SI =
729      FuncInfo.StaticAllocaMap.find(AI);
730    if (SI != FuncInfo.StaticAllocaMap.end())
731      return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
732  }
733
734  unsigned InReg = FuncInfo.ValueMap[V];
735  assert(InReg && "Value not in map!");
736
737  RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
738  SDValue Chain = DAG.getEntryNode();
739  return RFV.getCopyFromRegs(DAG, getCurDebugLoc(),
740                             SDNodeOrder, Chain, NULL);
741}
742
743/// Get the EVTs and ArgFlags collections that represent the legalized return
744/// type of the given function.  This does not require a DAG or a return value,
745/// and is suitable for use before any DAGs for the function are constructed.
746static void getReturnInfo(const Type* ReturnType,
747                   Attributes attr, SmallVectorImpl<EVT> &OutVTs,
748                   SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
749                   TargetLowering &TLI,
750                   SmallVectorImpl<uint64_t> *Offsets = 0) {
751  SmallVector<EVT, 4> ValueVTs;
752  ComputeValueVTs(TLI, ReturnType, ValueVTs);
753  unsigned NumValues = ValueVTs.size();
754  if (NumValues == 0) return;
755  unsigned Offset = 0;
756
757  for (unsigned j = 0, f = NumValues; j != f; ++j) {
758    EVT VT = ValueVTs[j];
759    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
760
761    if (attr & Attribute::SExt)
762      ExtendKind = ISD::SIGN_EXTEND;
763    else if (attr & Attribute::ZExt)
764      ExtendKind = ISD::ZERO_EXTEND;
765
766    // FIXME: C calling convention requires the return type to be promoted to
767    // at least 32-bit. But this is not necessary for non-C calling
768    // conventions. The frontend should mark functions whose return values
769    // require promoting with signext or zeroext attributes.
770    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
771      EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
772      if (VT.bitsLT(MinVT))
773        VT = MinVT;
774    }
775
776    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
777    EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
778    unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
779                        PartVT.getTypeForEVT(ReturnType->getContext()));
780
781    // 'inreg' on function refers to return value
782    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
783    if (attr & Attribute::InReg)
784      Flags.setInReg();
785
786    // Propagate extension type if any
787    if (attr & Attribute::SExt)
788      Flags.setSExt();
789    else if (attr & Attribute::ZExt)
790      Flags.setZExt();
791
792    for (unsigned i = 0; i < NumParts; ++i) {
793      OutVTs.push_back(PartVT);
794      OutFlags.push_back(Flags);
795      if (Offsets)
796      {
797        Offsets->push_back(Offset);
798        Offset += PartSize;
799      }
800    }
801  }
802}
803
804void SelectionDAGBuilder::visitRet(ReturnInst &I) {
805  SDValue Chain = getControlRoot();
806  SmallVector<ISD::OutputArg, 8> Outs;
807  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
808
809  if (!FLI.CanLowerReturn) {
810    unsigned DemoteReg = FLI.DemoteRegister;
811    const Function *F = I.getParent()->getParent();
812
813    // Emit a store of the return value through the virtual register.
814    // Leave Outs empty so that LowerReturn won't try to load return
815    // registers the usual way.
816    SmallVector<EVT, 1> PtrValueVTs;
817    ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
818                    PtrValueVTs);
819
820    SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
821    SDValue RetOp = getValue(I.getOperand(0));
822
823    SmallVector<EVT, 4> ValueVTs;
824    SmallVector<uint64_t, 4> Offsets;
825    ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
826    unsigned NumValues = ValueVTs.size();
827
828    SmallVector<SDValue, 4> Chains(NumValues);
829    EVT PtrVT = PtrValueVTs[0];
830    for (unsigned i = 0; i != NumValues; ++i) {
831      SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr,
832                                DAG.getConstant(Offsets[i], PtrVT));
833      Chains[i] =
834        DAG.getStore(Chain, getCurDebugLoc(),
835                     SDValue(RetOp.getNode(), RetOp.getResNo() + i),
836                     Add, NULL, Offsets[i], false, 0);
837    }
838
839    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
840                        MVT::Other, &Chains[0], NumValues);
841  } else {
842    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
843      SmallVector<EVT, 4> ValueVTs;
844      ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
845      unsigned NumValues = ValueVTs.size();
846      if (NumValues == 0) continue;
847
848      SDValue RetOp = getValue(I.getOperand(i));
849      for (unsigned j = 0, f = NumValues; j != f; ++j) {
850        EVT VT = ValueVTs[j];
851
852        ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
853
854        const Function *F = I.getParent()->getParent();
855        if (F->paramHasAttr(0, Attribute::SExt))
856          ExtendKind = ISD::SIGN_EXTEND;
857        else if (F->paramHasAttr(0, Attribute::ZExt))
858          ExtendKind = ISD::ZERO_EXTEND;
859
860        // FIXME: C calling convention requires the return type to be promoted
861        // to at least 32-bit. But this is not necessary for non-C calling
862        // conventions. The frontend should mark functions whose return values
863        // require promoting with signext or zeroext attributes.
864        if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
865          EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
866          if (VT.bitsLT(MinVT))
867            VT = MinVT;
868        }
869
870        unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
871        EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
872        SmallVector<SDValue, 4> Parts(NumParts);
873        getCopyToParts(DAG, getCurDebugLoc(), SDNodeOrder,
874                       SDValue(RetOp.getNode(), RetOp.getResNo() + j),
875                       &Parts[0], NumParts, PartVT, ExtendKind);
876
877        // 'inreg' on function refers to return value
878        ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
879        if (F->paramHasAttr(0, Attribute::InReg))
880          Flags.setInReg();
881
882        // Propagate extension type if any
883        if (F->paramHasAttr(0, Attribute::SExt))
884          Flags.setSExt();
885        else if (F->paramHasAttr(0, Attribute::ZExt))
886          Flags.setZExt();
887
888        for (unsigned i = 0; i < NumParts; ++i)
889          Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
890      }
891    }
892  }
893
894  bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
895  CallingConv::ID CallConv =
896    DAG.getMachineFunction().getFunction()->getCallingConv();
897  Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
898                          Outs, getCurDebugLoc(), DAG);
899
900  // Verify that the target's LowerReturn behaved as expected.
901  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
902         "LowerReturn didn't return a valid chain!");
903
904  // Update the DAG with the new chain value resulting from return lowering.
905  DAG.setRoot(Chain);
906}
907
908/// CopyToExportRegsIfNeeded - If the given value has virtual registers
909/// created for it, emit nodes to copy the value into the virtual
910/// registers.
911void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
912  if (!V->use_empty()) {
913    DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
914    if (VMI != FuncInfo.ValueMap.end())
915      CopyValueToVirtualRegister(V, VMI->second);
916  }
917}
918
919/// ExportFromCurrentBlock - If this condition isn't known to be exported from
920/// the current basic block, add it to ValueMap now so that we'll get a
921/// CopyTo/FromReg.
922void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
923  // No need to export constants.
924  if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
925
926  // Already exported?
927  if (FuncInfo.isExportedInst(V)) return;
928
929  unsigned Reg = FuncInfo.InitializeRegForValue(V);
930  CopyValueToVirtualRegister(V, Reg);
931}
932
933bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
934                                                     const BasicBlock *FromBB) {
935  // The operands of the setcc have to be in this block.  We don't know
936  // how to export them from some other block.
937  if (Instruction *VI = dyn_cast<Instruction>(V)) {
938    // Can export from current BB.
939    if (VI->getParent() == FromBB)
940      return true;
941
942    // Is already exported, noop.
943    return FuncInfo.isExportedInst(V);
944  }
945
946  // If this is an argument, we can export it if the BB is the entry block or
947  // if it is already exported.
948  if (isa<Argument>(V)) {
949    if (FromBB == &FromBB->getParent()->getEntryBlock())
950      return true;
951
952    // Otherwise, can only export this if it is already exported.
953    return FuncInfo.isExportedInst(V);
954  }
955
956  // Otherwise, constants can always be exported.
957  return true;
958}
959
960static bool InBlock(const Value *V, const BasicBlock *BB) {
961  if (const Instruction *I = dyn_cast<Instruction>(V))
962    return I->getParent() == BB;
963  return true;
964}
965
966/// getFCmpCondCode - Return the ISD condition code corresponding to
967/// the given LLVM IR floating-point condition code.  This includes
968/// consideration of global floating-point math flags.
969///
970static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
971  ISD::CondCode FPC, FOC;
972  switch (Pred) {
973  case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
974  case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
975  case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
976  case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
977  case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
978  case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
979  case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
980  case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
981  case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
982  case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
983  case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
984  case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
985  case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
986  case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
987  case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
988  case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
989  default:
990    llvm_unreachable("Invalid FCmp predicate opcode!");
991    FOC = FPC = ISD::SETFALSE;
992    break;
993  }
994  if (FiniteOnlyFPMath())
995    return FOC;
996  else
997    return FPC;
998}
999
1000/// getICmpCondCode - Return the ISD condition code corresponding to
1001/// the given LLVM IR integer condition code.
1002///
1003static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1004  switch (Pred) {
1005  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
1006  case ICmpInst::ICMP_NE:  return ISD::SETNE;
1007  case ICmpInst::ICMP_SLE: return ISD::SETLE;
1008  case ICmpInst::ICMP_ULE: return ISD::SETULE;
1009  case ICmpInst::ICMP_SGE: return ISD::SETGE;
1010  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1011  case ICmpInst::ICMP_SLT: return ISD::SETLT;
1012  case ICmpInst::ICMP_ULT: return ISD::SETULT;
1013  case ICmpInst::ICMP_SGT: return ISD::SETGT;
1014  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1015  default:
1016    llvm_unreachable("Invalid ICmp predicate opcode!");
1017    return ISD::SETNE;
1018  }
1019}
1020
1021/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1022/// This function emits a branch and is used at the leaves of an OR or an
1023/// AND operator tree.
1024///
1025void
1026SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
1027                                                  MachineBasicBlock *TBB,
1028                                                  MachineBasicBlock *FBB,
1029                                                  MachineBasicBlock *CurBB) {
1030  const BasicBlock *BB = CurBB->getBasicBlock();
1031
1032  // If the leaf of the tree is a comparison, merge the condition into
1033  // the caseblock.
1034  if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1035    // The operands of the cmp have to be in this block.  We don't know
1036    // how to export them from some other block.  If this is the first block
1037    // of the sequence, no exporting is needed.
1038    if (CurBB == CurMBB ||
1039        (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1040         isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1041      ISD::CondCode Condition;
1042      if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1043        Condition = getICmpCondCode(IC->getPredicate());
1044      } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1045        Condition = getFCmpCondCode(FC->getPredicate());
1046      } else {
1047        Condition = ISD::SETEQ; // silence warning.
1048        llvm_unreachable("Unknown compare instruction");
1049      }
1050
1051      CaseBlock CB(Condition, BOp->getOperand(0),
1052                   BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1053      SwitchCases.push_back(CB);
1054      return;
1055    }
1056  }
1057
1058  // Create a CaseBlock record representing this branch.
1059  CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1060               NULL, TBB, FBB, CurBB);
1061  SwitchCases.push_back(CB);
1062}
1063
1064/// FindMergedConditions - If Cond is an expression like
1065void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
1066                                               MachineBasicBlock *TBB,
1067                                               MachineBasicBlock *FBB,
1068                                               MachineBasicBlock *CurBB,
1069                                               unsigned Opc) {
1070  // If this node is not part of the or/and tree, emit it as a branch.
1071  Instruction *BOp = dyn_cast<Instruction>(Cond);
1072  if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1073      (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1074      BOp->getParent() != CurBB->getBasicBlock() ||
1075      !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1076      !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1077    EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1078    return;
1079  }
1080
1081  //  Create TmpBB after CurBB.
1082  MachineFunction::iterator BBI = CurBB;
1083  MachineFunction &MF = DAG.getMachineFunction();
1084  MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1085  CurBB->getParent()->insert(++BBI, TmpBB);
1086
1087  if (Opc == Instruction::Or) {
1088    // Codegen X | Y as:
1089    //   jmp_if_X TBB
1090    //   jmp TmpBB
1091    // TmpBB:
1092    //   jmp_if_Y TBB
1093    //   jmp FBB
1094    //
1095
1096    // Emit the LHS condition.
1097    FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1098
1099    // Emit the RHS condition into TmpBB.
1100    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1101  } else {
1102    assert(Opc == Instruction::And && "Unknown merge op!");
1103    // Codegen X & Y as:
1104    //   jmp_if_X TmpBB
1105    //   jmp FBB
1106    // TmpBB:
1107    //   jmp_if_Y TBB
1108    //   jmp FBB
1109    //
1110    //  This requires creation of TmpBB after CurBB.
1111
1112    // Emit the LHS condition.
1113    FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1114
1115    // Emit the RHS condition into TmpBB.
1116    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1117  }
1118}
1119
1120/// If the set of cases should be emitted as a series of branches, return true.
1121/// If we should emit this as a bunch of and/or'd together conditions, return
1122/// false.
1123bool
1124SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1125  if (Cases.size() != 2) return true;
1126
1127  // If this is two comparisons of the same values or'd or and'd together, they
1128  // will get folded into a single comparison, so don't emit two blocks.
1129  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1130       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1131      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1132       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1133    return false;
1134  }
1135
1136  // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1137  // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1138  if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1139      Cases[0].CC == Cases[1].CC &&
1140      isa<Constant>(Cases[0].CmpRHS) &&
1141      cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1142    if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1143      return false;
1144    if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1145      return false;
1146  }
1147
1148  return true;
1149}
1150
1151void SelectionDAGBuilder::visitBr(BranchInst &I) {
1152  // Update machine-CFG edges.
1153  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1154
1155  // Figure out which block is immediately after the current one.
1156  MachineBasicBlock *NextBlock = 0;
1157  MachineFunction::iterator BBI = CurMBB;
1158  if (++BBI != FuncInfo.MF->end())
1159    NextBlock = BBI;
1160
1161  if (I.isUnconditional()) {
1162    // Update machine-CFG edges.
1163    CurMBB->addSuccessor(Succ0MBB);
1164
1165    // If this is not a fall-through branch, emit the branch.
1166    if (Succ0MBB != NextBlock)
1167      DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1168                              MVT::Other, getControlRoot(),
1169                              DAG.getBasicBlock(Succ0MBB)));
1170
1171    return;
1172  }
1173
1174  // If this condition is one of the special cases we handle, do special stuff
1175  // now.
1176  Value *CondVal = I.getCondition();
1177  MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1178
1179  // If this is a series of conditions that are or'd or and'd together, emit
1180  // this as a sequence of branches instead of setcc's with and/or operations.
1181  // For example, instead of something like:
1182  //     cmp A, B
1183  //     C = seteq
1184  //     cmp D, E
1185  //     F = setle
1186  //     or C, F
1187  //     jnz foo
1188  // Emit:
1189  //     cmp A, B
1190  //     je foo
1191  //     cmp D, E
1192  //     jle foo
1193  //
1194  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1195    if (BOp->hasOneUse() &&
1196        (BOp->getOpcode() == Instruction::And ||
1197         BOp->getOpcode() == Instruction::Or)) {
1198      FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1199      // If the compares in later blocks need to use values not currently
1200      // exported from this block, export them now.  This block should always
1201      // be the first entry.
1202      assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1203
1204      // Allow some cases to be rejected.
1205      if (ShouldEmitAsBranches(SwitchCases)) {
1206        for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1207          ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1208          ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1209        }
1210
1211        // Emit the branch for this block.
1212        visitSwitchCase(SwitchCases[0]);
1213        SwitchCases.erase(SwitchCases.begin());
1214        return;
1215      }
1216
1217      // Okay, we decided not to do this, remove any inserted MBB's and clear
1218      // SwitchCases.
1219      for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1220        FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1221
1222      SwitchCases.clear();
1223    }
1224  }
1225
1226  // Create a CaseBlock record representing this branch.
1227  CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1228               NULL, Succ0MBB, Succ1MBB, CurMBB);
1229
1230  // Use visitSwitchCase to actually insert the fast branch sequence for this
1231  // cond branch.
1232  visitSwitchCase(CB);
1233}
1234
1235/// visitSwitchCase - Emits the necessary code to represent a single node in
1236/// the binary search tree resulting from lowering a switch instruction.
1237void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
1238  SDValue Cond;
1239  SDValue CondLHS = getValue(CB.CmpLHS);
1240  DebugLoc dl = getCurDebugLoc();
1241
1242  // Build the setcc now.
1243  if (CB.CmpMHS == NULL) {
1244    // Fold "(X == true)" to X and "(X == false)" to !X to
1245    // handle common cases produced by branch lowering.
1246    if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1247        CB.CC == ISD::SETEQ)
1248      Cond = CondLHS;
1249    else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1250             CB.CC == ISD::SETEQ) {
1251      SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1252      Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1253    } else
1254      Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1255  } else {
1256    assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1257
1258    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1259    const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
1260
1261    SDValue CmpOp = getValue(CB.CmpMHS);
1262    EVT VT = CmpOp.getValueType();
1263
1264    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1265      Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1266                          ISD::SETLE);
1267    } else {
1268      SDValue SUB = DAG.getNode(ISD::SUB, dl,
1269                                VT, CmpOp, DAG.getConstant(Low, VT));
1270      Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1271                          DAG.getConstant(High-Low, VT), ISD::SETULE);
1272    }
1273  }
1274
1275  // Update successor info
1276  CurMBB->addSuccessor(CB.TrueBB);
1277  CurMBB->addSuccessor(CB.FalseBB);
1278
1279  // Set NextBlock to be the MBB immediately after the current one, if any.
1280  // This is used to avoid emitting unnecessary branches to the next block.
1281  MachineBasicBlock *NextBlock = 0;
1282  MachineFunction::iterator BBI = CurMBB;
1283  if (++BBI != FuncInfo.MF->end())
1284    NextBlock = BBI;
1285
1286  // If the lhs block is the next block, invert the condition so that we can
1287  // fall through to the lhs instead of the rhs block.
1288  if (CB.TrueBB == NextBlock) {
1289    std::swap(CB.TrueBB, CB.FalseBB);
1290    SDValue True = DAG.getConstant(1, Cond.getValueType());
1291    Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1292  }
1293
1294  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1295                               MVT::Other, getControlRoot(), Cond,
1296                               DAG.getBasicBlock(CB.TrueBB));
1297
1298  // If the branch was constant folded, fix up the CFG.
1299  if (BrCond.getOpcode() == ISD::BR) {
1300    CurMBB->removeSuccessor(CB.FalseBB);
1301  } else {
1302    // Otherwise, go ahead and insert the false branch.
1303    if (BrCond == getControlRoot())
1304      CurMBB->removeSuccessor(CB.TrueBB);
1305
1306    if (CB.FalseBB != NextBlock)
1307      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1308                           DAG.getBasicBlock(CB.FalseBB));
1309  }
1310
1311  DAG.setRoot(BrCond);
1312}
1313
1314/// visitJumpTable - Emit JumpTable node in the current MBB
1315void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1316  // Emit the code for the jump table
1317  assert(JT.Reg != -1U && "Should lower JT Header first!");
1318  EVT PTy = TLI.getPointerTy();
1319  SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1320                                     JT.Reg, PTy);
1321  SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1322  SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1323                                    MVT::Other, Index.getValue(1),
1324                                    Table, Index);
1325  DAG.setRoot(BrJumpTable);
1326}
1327
1328/// visitJumpTableHeader - This function emits necessary code to produce index
1329/// in the JumpTable from switch case.
1330void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1331                                               JumpTableHeader &JTH) {
1332  // Subtract the lowest switch case value from the value being switched on and
1333  // conditional branch to default mbb if the result is greater than the
1334  // difference between smallest and largest cases.
1335  SDValue SwitchOp = getValue(JTH.SValue);
1336  EVT VT = SwitchOp.getValueType();
1337  SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1338                            DAG.getConstant(JTH.First, VT));
1339
1340  // The SDNode we just created, which holds the value being switched on minus
1341  // the the smallest case value, needs to be copied to a virtual register so it
1342  // can be used as an index into the jump table in a subsequent basic block.
1343  // This value may be smaller or larger than the target's pointer type, and
1344  // therefore require extension or truncating.
1345  SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
1346
1347  unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1348  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1349                                    JumpTableReg, SwitchOp);
1350  JT.Reg = JumpTableReg;
1351
1352  // Emit the range check for the jump table, and branch to the default block
1353  // for the switch statement if the value being switched on exceeds the largest
1354  // case in the switch.
1355  SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1356                             TLI.getSetCCResultType(Sub.getValueType()), Sub,
1357                             DAG.getConstant(JTH.Last-JTH.First,VT),
1358                             ISD::SETUGT);
1359
1360  // Set NextBlock to be the MBB immediately after the current one, if any.
1361  // This is used to avoid emitting unnecessary branches to the next block.
1362  MachineBasicBlock *NextBlock = 0;
1363  MachineFunction::iterator BBI = CurMBB;
1364
1365  if (++BBI != FuncInfo.MF->end())
1366    NextBlock = BBI;
1367
1368  SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1369                               MVT::Other, CopyTo, CMP,
1370                               DAG.getBasicBlock(JT.Default));
1371
1372  if (JT.MBB != NextBlock)
1373    BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1374                         DAG.getBasicBlock(JT.MBB));
1375
1376  DAG.setRoot(BrCond);
1377}
1378
1379/// visitBitTestHeader - This function emits necessary code to produce value
1380/// suitable for "bit tests"
1381void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
1382  // Subtract the minimum value
1383  SDValue SwitchOp = getValue(B.SValue);
1384  EVT VT = SwitchOp.getValueType();
1385  SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1386                            DAG.getConstant(B.First, VT));
1387
1388  // Check range
1389  SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1390                                  TLI.getSetCCResultType(Sub.getValueType()),
1391                                  Sub, DAG.getConstant(B.Range, VT),
1392                                  ISD::SETUGT);
1393
1394  SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
1395                                       TLI.getPointerTy());
1396
1397  B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1398  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1399                                    B.Reg, ShiftOp);
1400
1401  // Set NextBlock to be the MBB immediately after the current one, if any.
1402  // This is used to avoid emitting unnecessary branches to the next block.
1403  MachineBasicBlock *NextBlock = 0;
1404  MachineFunction::iterator BBI = CurMBB;
1405  if (++BBI != FuncInfo.MF->end())
1406    NextBlock = BBI;
1407
1408  MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1409
1410  CurMBB->addSuccessor(B.Default);
1411  CurMBB->addSuccessor(MBB);
1412
1413  SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1414                                MVT::Other, CopyTo, RangeCmp,
1415                                DAG.getBasicBlock(B.Default));
1416
1417  if (MBB != NextBlock)
1418    BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1419                          DAG.getBasicBlock(MBB));
1420
1421  DAG.setRoot(BrRange);
1422}
1423
1424/// visitBitTestCase - this function produces one "bit test"
1425void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
1426                                           unsigned Reg,
1427                                           BitTestCase &B) {
1428  // Make desired shift
1429  SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1430                                       TLI.getPointerTy());
1431  SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1432                                  TLI.getPointerTy(),
1433                                  DAG.getConstant(1, TLI.getPointerTy()),
1434                                  ShiftOp);
1435
1436  // Emit bit tests and jumps
1437  SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1438                              TLI.getPointerTy(), SwitchVal,
1439                              DAG.getConstant(B.Mask, TLI.getPointerTy()));
1440  SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1441                                TLI.getSetCCResultType(AndOp.getValueType()),
1442                                AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1443                                ISD::SETNE);
1444
1445  CurMBB->addSuccessor(B.TargetBB);
1446  CurMBB->addSuccessor(NextMBB);
1447
1448  SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1449                              MVT::Other, getControlRoot(),
1450                              AndCmp, DAG.getBasicBlock(B.TargetBB));
1451
1452  // Set NextBlock to be the MBB immediately after the current one, if any.
1453  // This is used to avoid emitting unnecessary branches to the next block.
1454  MachineBasicBlock *NextBlock = 0;
1455  MachineFunction::iterator BBI = CurMBB;
1456  if (++BBI != FuncInfo.MF->end())
1457    NextBlock = BBI;
1458
1459  if (NextMBB != NextBlock)
1460    BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1461                        DAG.getBasicBlock(NextMBB));
1462
1463  DAG.setRoot(BrAnd);
1464}
1465
1466void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
1467  // Retrieve successors.
1468  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1469  MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1470
1471  const Value *Callee(I.getCalledValue());
1472  if (isa<InlineAsm>(Callee))
1473    visitInlineAsm(&I);
1474  else
1475    LowerCallTo(&I, getValue(Callee), false, LandingPad);
1476
1477  // If the value of the invoke is used outside of its defining block, make it
1478  // available as a virtual register.
1479  CopyToExportRegsIfNeeded(&I);
1480
1481  // Update successor info
1482  CurMBB->addSuccessor(Return);
1483  CurMBB->addSuccessor(LandingPad);
1484
1485  // Drop into normal successor.
1486  DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1487                          MVT::Other, getControlRoot(),
1488                          DAG.getBasicBlock(Return)));
1489}
1490
1491void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
1492}
1493
1494/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1495/// small case ranges).
1496bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
1497                                                 CaseRecVector& WorkList,
1498                                                 Value* SV,
1499                                                 MachineBasicBlock* Default) {
1500  Case& BackCase  = *(CR.Range.second-1);
1501
1502  // Size is the number of Cases represented by this range.
1503  size_t Size = CR.Range.second - CR.Range.first;
1504  if (Size > 3)
1505    return false;
1506
1507  // Get the MachineFunction which holds the current MBB.  This is used when
1508  // inserting any additional MBBs necessary to represent the switch.
1509  MachineFunction *CurMF = FuncInfo.MF;
1510
1511  // Figure out which block is immediately after the current one.
1512  MachineBasicBlock *NextBlock = 0;
1513  MachineFunction::iterator BBI = CR.CaseBB;
1514
1515  if (++BBI != FuncInfo.MF->end())
1516    NextBlock = BBI;
1517
1518  // TODO: If any two of the cases has the same destination, and if one value
1519  // is the same as the other, but has one bit unset that the other has set,
1520  // use bit manipulation to do two compares at once.  For example:
1521  // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1522
1523  // Rearrange the case blocks so that the last one falls through if possible.
1524  if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1525    // The last case block won't fall through into 'NextBlock' if we emit the
1526    // branches in this order.  See if rearranging a case value would help.
1527    for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1528      if (I->BB == NextBlock) {
1529        std::swap(*I, BackCase);
1530        break;
1531      }
1532    }
1533  }
1534
1535  // Create a CaseBlock record representing a conditional branch to
1536  // the Case's target mbb if the value being switched on SV is equal
1537  // to C.
1538  MachineBasicBlock *CurBlock = CR.CaseBB;
1539  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1540    MachineBasicBlock *FallThrough;
1541    if (I != E-1) {
1542      FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1543      CurMF->insert(BBI, FallThrough);
1544
1545      // Put SV in a virtual register to make it available from the new blocks.
1546      ExportFromCurrentBlock(SV);
1547    } else {
1548      // If the last case doesn't match, go to the default block.
1549      FallThrough = Default;
1550    }
1551
1552    Value *RHS, *LHS, *MHS;
1553    ISD::CondCode CC;
1554    if (I->High == I->Low) {
1555      // This is just small small case range :) containing exactly 1 case
1556      CC = ISD::SETEQ;
1557      LHS = SV; RHS = I->High; MHS = NULL;
1558    } else {
1559      CC = ISD::SETLE;
1560      LHS = I->Low; MHS = SV; RHS = I->High;
1561    }
1562    CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1563
1564    // If emitting the first comparison, just call visitSwitchCase to emit the
1565    // code into the current block.  Otherwise, push the CaseBlock onto the
1566    // vector to be later processed by SDISel, and insert the node's MBB
1567    // before the next MBB.
1568    if (CurBlock == CurMBB)
1569      visitSwitchCase(CB);
1570    else
1571      SwitchCases.push_back(CB);
1572
1573    CurBlock = FallThrough;
1574  }
1575
1576  return true;
1577}
1578
1579static inline bool areJTsAllowed(const TargetLowering &TLI) {
1580  return !DisableJumpTables &&
1581          (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1582           TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1583}
1584
1585static APInt ComputeRange(const APInt &First, const APInt &Last) {
1586  APInt LastExt(Last), FirstExt(First);
1587  uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1588  LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1589  return (LastExt - FirstExt + 1ULL);
1590}
1591
1592/// handleJTSwitchCase - Emit jumptable for current switch case range
1593bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
1594                                             CaseRecVector& WorkList,
1595                                             Value* SV,
1596                                             MachineBasicBlock* Default) {
1597  Case& FrontCase = *CR.Range.first;
1598  Case& BackCase  = *(CR.Range.second-1);
1599
1600  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1601  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1602
1603  APInt TSize(First.getBitWidth(), 0);
1604  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1605       I!=E; ++I)
1606    TSize += I->size();
1607
1608  if (!areJTsAllowed(TLI) || TSize.ult(APInt(First.getBitWidth(), 4)))
1609    return false;
1610
1611  APInt Range = ComputeRange(First, Last);
1612  double Density = TSize.roundToDouble() / Range.roundToDouble();
1613  if (Density < 0.4)
1614    return false;
1615
1616  DEBUG(dbgs() << "Lowering jump table\n"
1617               << "First entry: " << First << ". Last entry: " << Last << '\n'
1618               << "Range: " << Range
1619               << "Size: " << TSize << ". Density: " << Density << "\n\n");
1620
1621  // Get the MachineFunction which holds the current MBB.  This is used when
1622  // inserting any additional MBBs necessary to represent the switch.
1623  MachineFunction *CurMF = FuncInfo.MF;
1624
1625  // Figure out which block is immediately after the current one.
1626  MachineFunction::iterator BBI = CR.CaseBB;
1627  ++BBI;
1628
1629  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1630
1631  // Create a new basic block to hold the code for loading the address
1632  // of the jump table, and jumping to it.  Update successor information;
1633  // we will either branch to the default case for the switch, or the jump
1634  // table.
1635  MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1636  CurMF->insert(BBI, JumpTableBB);
1637  CR.CaseBB->addSuccessor(Default);
1638  CR.CaseBB->addSuccessor(JumpTableBB);
1639
1640  // Build a vector of destination BBs, corresponding to each target
1641  // of the jump table. If the value of the jump table slot corresponds to
1642  // a case statement, push the case's BB onto the vector, otherwise, push
1643  // the default BB.
1644  std::vector<MachineBasicBlock*> DestBBs;
1645  APInt TEI = First;
1646  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1647    const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
1648    const APInt &High = cast<ConstantInt>(I->High)->getValue();
1649
1650    if (Low.sle(TEI) && TEI.sle(High)) {
1651      DestBBs.push_back(I->BB);
1652      if (TEI==High)
1653        ++I;
1654    } else {
1655      DestBBs.push_back(Default);
1656    }
1657  }
1658
1659  // Update successor info. Add one edge to each unique successor.
1660  BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1661  for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1662         E = DestBBs.end(); I != E; ++I) {
1663    if (!SuccsHandled[(*I)->getNumber()]) {
1664      SuccsHandled[(*I)->getNumber()] = true;
1665      JumpTableBB->addSuccessor(*I);
1666    }
1667  }
1668
1669  // Create a jump table index for this jump table, or return an existing
1670  // one.
1671  unsigned JTEncoding = TLI.getJumpTableEncoding();
1672  unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
1673                       ->getJumpTableIndex(DestBBs);
1674
1675  // Set the jump table information so that we can codegen it as a second
1676  // MachineBasicBlock
1677  JumpTable JT(-1U, JTI, JumpTableBB, Default);
1678  JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1679  if (CR.CaseBB == CurMBB)
1680    visitJumpTableHeader(JT, JTH);
1681
1682  JTCases.push_back(JumpTableBlock(JTH, JT));
1683
1684  return true;
1685}
1686
1687/// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1688/// 2 subtrees.
1689bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
1690                                                  CaseRecVector& WorkList,
1691                                                  Value* SV,
1692                                                  MachineBasicBlock* Default) {
1693  // Get the MachineFunction which holds the current MBB.  This is used when
1694  // inserting any additional MBBs necessary to represent the switch.
1695  MachineFunction *CurMF = FuncInfo.MF;
1696
1697  // Figure out which block is immediately after the current one.
1698  MachineFunction::iterator BBI = CR.CaseBB;
1699  ++BBI;
1700
1701  Case& FrontCase = *CR.Range.first;
1702  Case& BackCase  = *(CR.Range.second-1);
1703  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1704
1705  // Size is the number of Cases represented by this range.
1706  unsigned Size = CR.Range.second - CR.Range.first;
1707
1708  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1709  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1710  double FMetric = 0;
1711  CaseItr Pivot = CR.Range.first + Size/2;
1712
1713  // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1714  // (heuristically) allow us to emit JumpTable's later.
1715  APInt TSize(First.getBitWidth(), 0);
1716  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1717       I!=E; ++I)
1718    TSize += I->size();
1719
1720  APInt LSize = FrontCase.size();
1721  APInt RSize = TSize-LSize;
1722  DEBUG(dbgs() << "Selecting best pivot: \n"
1723               << "First: " << First << ", Last: " << Last <<'\n'
1724               << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1725  for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1726       J!=E; ++I, ++J) {
1727    const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
1728    const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
1729    APInt Range = ComputeRange(LEnd, RBegin);
1730    assert((Range - 2ULL).isNonNegative() &&
1731           "Invalid case distance");
1732    double LDensity = (double)LSize.roundToDouble() /
1733                           (LEnd - First + 1ULL).roundToDouble();
1734    double RDensity = (double)RSize.roundToDouble() /
1735                           (Last - RBegin + 1ULL).roundToDouble();
1736    double Metric = Range.logBase2()*(LDensity+RDensity);
1737    // Should always split in some non-trivial place
1738    DEBUG(dbgs() <<"=>Step\n"
1739                 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1740                 << "LDensity: " << LDensity
1741                 << ", RDensity: " << RDensity << '\n'
1742                 << "Metric: " << Metric << '\n');
1743    if (FMetric < Metric) {
1744      Pivot = J;
1745      FMetric = Metric;
1746      DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
1747    }
1748
1749    LSize += J->size();
1750    RSize -= J->size();
1751  }
1752  if (areJTsAllowed(TLI)) {
1753    // If our case is dense we *really* should handle it earlier!
1754    assert((FMetric > 0) && "Should handle dense range earlier!");
1755  } else {
1756    Pivot = CR.Range.first + Size/2;
1757  }
1758
1759  CaseRange LHSR(CR.Range.first, Pivot);
1760  CaseRange RHSR(Pivot, CR.Range.second);
1761  Constant *C = Pivot->Low;
1762  MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1763
1764  // We know that we branch to the LHS if the Value being switched on is
1765  // less than the Pivot value, C.  We use this to optimize our binary
1766  // tree a bit, by recognizing that if SV is greater than or equal to the
1767  // LHS's Case Value, and that Case Value is exactly one less than the
1768  // Pivot's Value, then we can branch directly to the LHS's Target,
1769  // rather than creating a leaf node for it.
1770  if ((LHSR.second - LHSR.first) == 1 &&
1771      LHSR.first->High == CR.GE &&
1772      cast<ConstantInt>(C)->getValue() ==
1773      (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1774    TrueBB = LHSR.first->BB;
1775  } else {
1776    TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1777    CurMF->insert(BBI, TrueBB);
1778    WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1779
1780    // Put SV in a virtual register to make it available from the new blocks.
1781    ExportFromCurrentBlock(SV);
1782  }
1783
1784  // Similar to the optimization above, if the Value being switched on is
1785  // known to be less than the Constant CR.LT, and the current Case Value
1786  // is CR.LT - 1, then we can branch directly to the target block for
1787  // the current Case Value, rather than emitting a RHS leaf node for it.
1788  if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1789      cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1790      (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1791    FalseBB = RHSR.first->BB;
1792  } else {
1793    FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1794    CurMF->insert(BBI, FalseBB);
1795    WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1796
1797    // Put SV in a virtual register to make it available from the new blocks.
1798    ExportFromCurrentBlock(SV);
1799  }
1800
1801  // Create a CaseBlock record representing a conditional branch to
1802  // the LHS node if the value being switched on SV is less than C.
1803  // Otherwise, branch to LHS.
1804  CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1805
1806  if (CR.CaseBB == CurMBB)
1807    visitSwitchCase(CB);
1808  else
1809    SwitchCases.push_back(CB);
1810
1811  return true;
1812}
1813
1814/// handleBitTestsSwitchCase - if current case range has few destination and
1815/// range span less, than machine word bitwidth, encode case range into series
1816/// of masks and emit bit tests with these masks.
1817bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
1818                                                   CaseRecVector& WorkList,
1819                                                   Value* SV,
1820                                                   MachineBasicBlock* Default){
1821  EVT PTy = TLI.getPointerTy();
1822  unsigned IntPtrBits = PTy.getSizeInBits();
1823
1824  Case& FrontCase = *CR.Range.first;
1825  Case& BackCase  = *(CR.Range.second-1);
1826
1827  // Get the MachineFunction which holds the current MBB.  This is used when
1828  // inserting any additional MBBs necessary to represent the switch.
1829  MachineFunction *CurMF = FuncInfo.MF;
1830
1831  // If target does not have legal shift left, do not emit bit tests at all.
1832  if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1833    return false;
1834
1835  size_t numCmps = 0;
1836  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1837       I!=E; ++I) {
1838    // Single case counts one, case range - two.
1839    numCmps += (I->Low == I->High ? 1 : 2);
1840  }
1841
1842  // Count unique destinations
1843  SmallSet<MachineBasicBlock*, 4> Dests;
1844  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1845    Dests.insert(I->BB);
1846    if (Dests.size() > 3)
1847      // Don't bother the code below, if there are too much unique destinations
1848      return false;
1849  }
1850  DEBUG(dbgs() << "Total number of unique destinations: "
1851        << Dests.size() << '\n'
1852        << "Total number of comparisons: " << numCmps << '\n');
1853
1854  // Compute span of values.
1855  const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1856  const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1857  APInt cmpRange = maxValue - minValue;
1858
1859  DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
1860               << "Low bound: " << minValue << '\n'
1861               << "High bound: " << maxValue << '\n');
1862
1863  if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1864      (!(Dests.size() == 1 && numCmps >= 3) &&
1865       !(Dests.size() == 2 && numCmps >= 5) &&
1866       !(Dests.size() >= 3 && numCmps >= 6)))
1867    return false;
1868
1869  DEBUG(dbgs() << "Emitting bit tests\n");
1870  APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1871
1872  // Optimize the case where all the case values fit in a
1873  // word without having to subtract minValue. In this case,
1874  // we can optimize away the subtraction.
1875  if (minValue.isNonNegative() &&
1876      maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1877    cmpRange = maxValue;
1878  } else {
1879    lowBound = minValue;
1880  }
1881
1882  CaseBitsVector CasesBits;
1883  unsigned i, count = 0;
1884
1885  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1886    MachineBasicBlock* Dest = I->BB;
1887    for (i = 0; i < count; ++i)
1888      if (Dest == CasesBits[i].BB)
1889        break;
1890
1891    if (i == count) {
1892      assert((count < 3) && "Too much destinations to test!");
1893      CasesBits.push_back(CaseBits(0, Dest, 0));
1894      count++;
1895    }
1896
1897    const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1898    const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1899
1900    uint64_t lo = (lowValue - lowBound).getZExtValue();
1901    uint64_t hi = (highValue - lowBound).getZExtValue();
1902
1903    for (uint64_t j = lo; j <= hi; j++) {
1904      CasesBits[i].Mask |=  1ULL << j;
1905      CasesBits[i].Bits++;
1906    }
1907
1908  }
1909  std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1910
1911  BitTestInfo BTC;
1912
1913  // Figure out which block is immediately after the current one.
1914  MachineFunction::iterator BBI = CR.CaseBB;
1915  ++BBI;
1916
1917  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1918
1919  DEBUG(dbgs() << "Cases:\n");
1920  for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
1921    DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
1922                 << ", Bits: " << CasesBits[i].Bits
1923                 << ", BB: " << CasesBits[i].BB << '\n');
1924
1925    MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1926    CurMF->insert(BBI, CaseBB);
1927    BTC.push_back(BitTestCase(CasesBits[i].Mask,
1928                              CaseBB,
1929                              CasesBits[i].BB));
1930
1931    // Put SV in a virtual register to make it available from the new blocks.
1932    ExportFromCurrentBlock(SV);
1933  }
1934
1935  BitTestBlock BTB(lowBound, cmpRange, SV,
1936                   -1U, (CR.CaseBB == CurMBB),
1937                   CR.CaseBB, Default, BTC);
1938
1939  if (CR.CaseBB == CurMBB)
1940    visitBitTestHeader(BTB);
1941
1942  BitTestCases.push_back(BTB);
1943
1944  return true;
1945}
1946
1947/// Clusterify - Transform simple list of Cases into list of CaseRange's
1948size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
1949                                       const SwitchInst& SI) {
1950  size_t numCmps = 0;
1951
1952  // Start with "simple" cases
1953  for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
1954    MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
1955    Cases.push_back(Case(SI.getSuccessorValue(i),
1956                         SI.getSuccessorValue(i),
1957                         SMBB));
1958  }
1959  std::sort(Cases.begin(), Cases.end(), CaseCmp());
1960
1961  // Merge case into clusters
1962  if (Cases.size() >= 2)
1963    // Must recompute end() each iteration because it may be
1964    // invalidated by erase if we hold on to it
1965    for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
1966      const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
1967      const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
1968      MachineBasicBlock* nextBB = J->BB;
1969      MachineBasicBlock* currentBB = I->BB;
1970
1971      // If the two neighboring cases go to the same destination, merge them
1972      // into a single case.
1973      if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
1974        I->High = J->High;
1975        J = Cases.erase(J);
1976      } else {
1977        I = J++;
1978      }
1979    }
1980
1981  for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
1982    if (I->Low != I->High)
1983      // A range counts double, since it requires two compares.
1984      ++numCmps;
1985  }
1986
1987  return numCmps;
1988}
1989
1990void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
1991  // Figure out which block is immediately after the current one.
1992  MachineBasicBlock *NextBlock = 0;
1993  MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
1994
1995  // If there is only the default destination, branch to it if it is not the
1996  // next basic block.  Otherwise, just fall through.
1997  if (SI.getNumOperands() == 2) {
1998    // Update machine-CFG edges.
1999
2000    // If this is not a fall-through branch, emit the branch.
2001    CurMBB->addSuccessor(Default);
2002    if (Default != NextBlock)
2003      DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2004                              MVT::Other, getControlRoot(),
2005                              DAG.getBasicBlock(Default)));
2006
2007    return;
2008  }
2009
2010  // If there are any non-default case statements, create a vector of Cases
2011  // representing each one, and sort the vector so that we can efficiently
2012  // create a binary search tree from them.
2013  CaseVector Cases;
2014  size_t numCmps = Clusterify(Cases, SI);
2015  DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
2016               << ". Total compares: " << numCmps << '\n');
2017  numCmps = 0;
2018
2019  // Get the Value to be switched on and default basic blocks, which will be
2020  // inserted into CaseBlock records, representing basic blocks in the binary
2021  // search tree.
2022  Value *SV = SI.getOperand(0);
2023
2024  // Push the initial CaseRec onto the worklist
2025  CaseRecVector WorkList;
2026  WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2027
2028  while (!WorkList.empty()) {
2029    // Grab a record representing a case range to process off the worklist
2030    CaseRec CR = WorkList.back();
2031    WorkList.pop_back();
2032
2033    if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2034      continue;
2035
2036    // If the range has few cases (two or less) emit a series of specific
2037    // tests.
2038    if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2039      continue;
2040
2041    // If the switch has more than 5 blocks, and at least 40% dense, and the
2042    // target supports indirect branches, then emit a jump table rather than
2043    // lowering the switch to a binary tree of conditional branches.
2044    if (handleJTSwitchCase(CR, WorkList, SV, Default))
2045      continue;
2046
2047    // Emit binary tree. We need to pick a pivot, and push left and right ranges
2048    // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2049    handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2050  }
2051}
2052
2053void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
2054  // Update machine-CFG edges.
2055  for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
2056    CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]);
2057
2058  DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(),
2059                          MVT::Other, getControlRoot(),
2060                          getValue(I.getAddress())));
2061}
2062
2063void SelectionDAGBuilder::visitFSub(User &I) {
2064  // -0.0 - X --> fneg
2065  const Type *Ty = I.getType();
2066  if (isa<VectorType>(Ty)) {
2067    if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2068      const VectorType *DestTy = cast<VectorType>(I.getType());
2069      const Type *ElTy = DestTy->getElementType();
2070      unsigned VL = DestTy->getNumElements();
2071      std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2072      Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2073      if (CV == CNZ) {
2074        SDValue Op2 = getValue(I.getOperand(1));
2075        setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2076                                 Op2.getValueType(), Op2));
2077        return;
2078      }
2079    }
2080  }
2081
2082  if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2083    if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2084      SDValue Op2 = getValue(I.getOperand(1));
2085      setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2086                               Op2.getValueType(), Op2));
2087      return;
2088    }
2089
2090  visitBinary(I, ISD::FSUB);
2091}
2092
2093void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
2094  SDValue Op1 = getValue(I.getOperand(0));
2095  SDValue Op2 = getValue(I.getOperand(1));
2096  setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2097                           Op1.getValueType(), Op1, Op2));
2098}
2099
2100void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
2101  SDValue Op1 = getValue(I.getOperand(0));
2102  SDValue Op2 = getValue(I.getOperand(1));
2103  if (!isa<VectorType>(I.getType()) &&
2104      Op2.getValueType() != TLI.getShiftAmountTy()) {
2105    // If the operand is smaller than the shift count type, promote it.
2106    EVT PTy = TLI.getPointerTy();
2107    EVT STy = TLI.getShiftAmountTy();
2108    if (STy.bitsGT(Op2.getValueType()))
2109      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2110                        TLI.getShiftAmountTy(), Op2);
2111    // If the operand is larger than the shift count type but the shift
2112    // count type has enough bits to represent any shift value, truncate
2113    // it now. This is a common case and it exposes the truncate to
2114    // optimization early.
2115    else if (STy.getSizeInBits() >=
2116             Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2117      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2118                        TLI.getShiftAmountTy(), Op2);
2119    // Otherwise we'll need to temporarily settle for some other
2120    // convenient type; type legalization will make adjustments as
2121    // needed.
2122    else if (PTy.bitsLT(Op2.getValueType()))
2123      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2124                        TLI.getPointerTy(), Op2);
2125    else if (PTy.bitsGT(Op2.getValueType()))
2126      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2127                        TLI.getPointerTy(), Op2);
2128  }
2129
2130  setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2131                           Op1.getValueType(), Op1, Op2));
2132}
2133
2134void SelectionDAGBuilder::visitICmp(User &I) {
2135  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2136  if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2137    predicate = IC->getPredicate();
2138  else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2139    predicate = ICmpInst::Predicate(IC->getPredicate());
2140  SDValue Op1 = getValue(I.getOperand(0));
2141  SDValue Op2 = getValue(I.getOperand(1));
2142  ISD::CondCode Opcode = getICmpCondCode(predicate);
2143
2144  EVT DestVT = TLI.getValueType(I.getType());
2145  setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2146}
2147
2148void SelectionDAGBuilder::visitFCmp(User &I) {
2149  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2150  if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2151    predicate = FC->getPredicate();
2152  else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2153    predicate = FCmpInst::Predicate(FC->getPredicate());
2154  SDValue Op1 = getValue(I.getOperand(0));
2155  SDValue Op2 = getValue(I.getOperand(1));
2156  ISD::CondCode Condition = getFCmpCondCode(predicate);
2157  EVT DestVT = TLI.getValueType(I.getType());
2158  setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2159}
2160
2161void SelectionDAGBuilder::visitSelect(User &I) {
2162  SmallVector<EVT, 4> ValueVTs;
2163  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2164  unsigned NumValues = ValueVTs.size();
2165  if (NumValues == 0) return;
2166
2167  SmallVector<SDValue, 4> Values(NumValues);
2168  SDValue Cond     = getValue(I.getOperand(0));
2169  SDValue TrueVal  = getValue(I.getOperand(1));
2170  SDValue FalseVal = getValue(I.getOperand(2));
2171
2172  for (unsigned i = 0; i != NumValues; ++i)
2173    Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2174                            TrueVal.getNode()->getValueType(i), Cond,
2175                            SDValue(TrueVal.getNode(),
2176                                    TrueVal.getResNo() + i),
2177                            SDValue(FalseVal.getNode(),
2178                                    FalseVal.getResNo() + i));
2179
2180  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2181                           DAG.getVTList(&ValueVTs[0], NumValues),
2182                           &Values[0], NumValues));
2183}
2184
2185void SelectionDAGBuilder::visitTrunc(User &I) {
2186  // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2187  SDValue N = getValue(I.getOperand(0));
2188  EVT DestVT = TLI.getValueType(I.getType());
2189  setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2190}
2191
2192void SelectionDAGBuilder::visitZExt(User &I) {
2193  // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2194  // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2195  SDValue N = getValue(I.getOperand(0));
2196  EVT DestVT = TLI.getValueType(I.getType());
2197  setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2198}
2199
2200void SelectionDAGBuilder::visitSExt(User &I) {
2201  // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2202  // SExt also can't be a cast to bool for same reason. So, nothing much to do
2203  SDValue N = getValue(I.getOperand(0));
2204  EVT DestVT = TLI.getValueType(I.getType());
2205  setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2206}
2207
2208void SelectionDAGBuilder::visitFPTrunc(User &I) {
2209  // FPTrunc is never a no-op cast, no need to check
2210  SDValue N = getValue(I.getOperand(0));
2211  EVT DestVT = TLI.getValueType(I.getType());
2212  setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2213                           DestVT, N, DAG.getIntPtrConstant(0)));
2214}
2215
2216void SelectionDAGBuilder::visitFPExt(User &I){
2217  // FPTrunc is never a no-op cast, no need to check
2218  SDValue N = getValue(I.getOperand(0));
2219  EVT DestVT = TLI.getValueType(I.getType());
2220  setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2221}
2222
2223void SelectionDAGBuilder::visitFPToUI(User &I) {
2224  // FPToUI is never a no-op cast, no need to check
2225  SDValue N = getValue(I.getOperand(0));
2226  EVT DestVT = TLI.getValueType(I.getType());
2227  setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2228}
2229
2230void SelectionDAGBuilder::visitFPToSI(User &I) {
2231  // FPToSI is never a no-op cast, no need to check
2232  SDValue N = getValue(I.getOperand(0));
2233  EVT DestVT = TLI.getValueType(I.getType());
2234  setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2235}
2236
2237void SelectionDAGBuilder::visitUIToFP(User &I) {
2238  // UIToFP is never a no-op cast, no need to check
2239  SDValue N = getValue(I.getOperand(0));
2240  EVT DestVT = TLI.getValueType(I.getType());
2241  setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2242}
2243
2244void SelectionDAGBuilder::visitSIToFP(User &I){
2245  // SIToFP is never a no-op cast, no need to check
2246  SDValue N = getValue(I.getOperand(0));
2247  EVT DestVT = TLI.getValueType(I.getType());
2248  setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2249}
2250
2251void SelectionDAGBuilder::visitPtrToInt(User &I) {
2252  // What to do depends on the size of the integer and the size of the pointer.
2253  // We can either truncate, zero extend, or no-op, accordingly.
2254  SDValue N = getValue(I.getOperand(0));
2255  EVT SrcVT = N.getValueType();
2256  EVT DestVT = TLI.getValueType(I.getType());
2257  setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
2258}
2259
2260void SelectionDAGBuilder::visitIntToPtr(User &I) {
2261  // What to do depends on the size of the integer and the size of the pointer.
2262  // We can either truncate, zero extend, or no-op, accordingly.
2263  SDValue N = getValue(I.getOperand(0));
2264  EVT SrcVT = N.getValueType();
2265  EVT DestVT = TLI.getValueType(I.getType());
2266  setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
2267}
2268
2269void SelectionDAGBuilder::visitBitCast(User &I) {
2270  SDValue N = getValue(I.getOperand(0));
2271  EVT DestVT = TLI.getValueType(I.getType());
2272
2273  // BitCast assures us that source and destination are the same size so this is
2274  // either a BIT_CONVERT or a no-op.
2275  if (DestVT != N.getValueType())
2276    setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2277                             DestVT, N)); // convert types.
2278  else
2279    setValue(&I, N);            // noop cast.
2280}
2281
2282void SelectionDAGBuilder::visitInsertElement(User &I) {
2283  SDValue InVec = getValue(I.getOperand(0));
2284  SDValue InVal = getValue(I.getOperand(1));
2285  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2286                              TLI.getPointerTy(),
2287                              getValue(I.getOperand(2)));
2288  setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2289                           TLI.getValueType(I.getType()),
2290                           InVec, InVal, InIdx));
2291}
2292
2293void SelectionDAGBuilder::visitExtractElement(User &I) {
2294  SDValue InVec = getValue(I.getOperand(0));
2295  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2296                              TLI.getPointerTy(),
2297                              getValue(I.getOperand(1)));
2298  setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2299                           TLI.getValueType(I.getType()), InVec, InIdx));
2300}
2301
2302// Utility for visitShuffleVector - Returns true if the mask is mask starting
2303// from SIndx and increasing to the element length (undefs are allowed).
2304static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2305  unsigned MaskNumElts = Mask.size();
2306  for (unsigned i = 0; i != MaskNumElts; ++i)
2307    if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2308      return false;
2309  return true;
2310}
2311
2312void SelectionDAGBuilder::visitShuffleVector(User &I) {
2313  SmallVector<int, 8> Mask;
2314  SDValue Src1 = getValue(I.getOperand(0));
2315  SDValue Src2 = getValue(I.getOperand(1));
2316
2317  // Convert the ConstantVector mask operand into an array of ints, with -1
2318  // representing undef values.
2319  SmallVector<Constant*, 8> MaskElts;
2320  cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
2321  unsigned MaskNumElts = MaskElts.size();
2322  for (unsigned i = 0; i != MaskNumElts; ++i) {
2323    if (isa<UndefValue>(MaskElts[i]))
2324      Mask.push_back(-1);
2325    else
2326      Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2327  }
2328
2329  EVT VT = TLI.getValueType(I.getType());
2330  EVT SrcVT = Src1.getValueType();
2331  unsigned SrcNumElts = SrcVT.getVectorNumElements();
2332
2333  if (SrcNumElts == MaskNumElts) {
2334    setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2335                                      &Mask[0]));
2336    return;
2337  }
2338
2339  // Normalize the shuffle vector since mask and vector length don't match.
2340  if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2341    // Mask is longer than the source vectors and is a multiple of the source
2342    // vectors.  We can use concatenate vector to make the mask and vectors
2343    // lengths match.
2344    if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2345      // The shuffle is concatenating two vectors together.
2346      setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2347                               VT, Src1, Src2));
2348      return;
2349    }
2350
2351    // Pad both vectors with undefs to make them the same length as the mask.
2352    unsigned NumConcat = MaskNumElts / SrcNumElts;
2353    bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2354    bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2355    SDValue UndefVal = DAG.getUNDEF(SrcVT);
2356
2357    SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2358    SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2359    MOps1[0] = Src1;
2360    MOps2[0] = Src2;
2361
2362    Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2363                                                  getCurDebugLoc(), VT,
2364                                                  &MOps1[0], NumConcat);
2365    Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2366                                                  getCurDebugLoc(), VT,
2367                                                  &MOps2[0], NumConcat);
2368
2369    // Readjust mask for new input vector length.
2370    SmallVector<int, 8> MappedOps;
2371    for (unsigned i = 0; i != MaskNumElts; ++i) {
2372      int Idx = Mask[i];
2373      if (Idx < (int)SrcNumElts)
2374        MappedOps.push_back(Idx);
2375      else
2376        MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2377    }
2378
2379    setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2380                                      &MappedOps[0]));
2381    return;
2382  }
2383
2384  if (SrcNumElts > MaskNumElts) {
2385    // Analyze the access pattern of the vector to see if we can extract
2386    // two subvectors and do the shuffle. The analysis is done by calculating
2387    // the range of elements the mask access on both vectors.
2388    int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2389    int MaxRange[2] = {-1, -1};
2390
2391    for (unsigned i = 0; i != MaskNumElts; ++i) {
2392      int Idx = Mask[i];
2393      int Input = 0;
2394      if (Idx < 0)
2395        continue;
2396
2397      if (Idx >= (int)SrcNumElts) {
2398        Input = 1;
2399        Idx -= SrcNumElts;
2400      }
2401      if (Idx > MaxRange[Input])
2402        MaxRange[Input] = Idx;
2403      if (Idx < MinRange[Input])
2404        MinRange[Input] = Idx;
2405    }
2406
2407    // Check if the access is smaller than the vector size and can we find
2408    // a reasonable extract index.
2409    int RangeUse[2] = { 2, 2 };  // 0 = Unused, 1 = Extract, 2 = Can not
2410                                 // Extract.
2411    int StartIdx[2];  // StartIdx to extract from
2412    for (int Input=0; Input < 2; ++Input) {
2413      if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2414        RangeUse[Input] = 0; // Unused
2415        StartIdx[Input] = 0;
2416      } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2417        // Fits within range but we should see if we can find a good
2418        // start index that is a multiple of the mask length.
2419        if (MaxRange[Input] < (int)MaskNumElts) {
2420          RangeUse[Input] = 1; // Extract from beginning of the vector
2421          StartIdx[Input] = 0;
2422        } else {
2423          StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2424          if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2425              StartIdx[Input] + MaskNumElts < SrcNumElts)
2426            RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2427        }
2428      }
2429    }
2430
2431    if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2432      setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2433      return;
2434    }
2435    else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2436      // Extract appropriate subvector and generate a vector shuffle
2437      for (int Input=0; Input < 2; ++Input) {
2438        SDValue &Src = Input == 0 ? Src1 : Src2;
2439        if (RangeUse[Input] == 0)
2440          Src = DAG.getUNDEF(VT);
2441        else
2442          Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2443                            Src, DAG.getIntPtrConstant(StartIdx[Input]));
2444      }
2445
2446      // Calculate new mask.
2447      SmallVector<int, 8> MappedOps;
2448      for (unsigned i = 0; i != MaskNumElts; ++i) {
2449        int Idx = Mask[i];
2450        if (Idx < 0)
2451          MappedOps.push_back(Idx);
2452        else if (Idx < (int)SrcNumElts)
2453          MappedOps.push_back(Idx - StartIdx[0]);
2454        else
2455          MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2456      }
2457
2458      setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2459                                        &MappedOps[0]));
2460      return;
2461    }
2462  }
2463
2464  // We can't use either concat vectors or extract subvectors so fall back to
2465  // replacing the shuffle with extract and build vector.
2466  // to insert and build vector.
2467  EVT EltVT = VT.getVectorElementType();
2468  EVT PtrVT = TLI.getPointerTy();
2469  SmallVector<SDValue,8> Ops;
2470  for (unsigned i = 0; i != MaskNumElts; ++i) {
2471    if (Mask[i] < 0) {
2472      Ops.push_back(DAG.getUNDEF(EltVT));
2473    } else {
2474      int Idx = Mask[i];
2475      SDValue Res;
2476
2477      if (Idx < (int)SrcNumElts)
2478        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2479                          EltVT, Src1, DAG.getConstant(Idx, PtrVT));
2480      else
2481        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2482                          EltVT, Src2,
2483                          DAG.getConstant(Idx - SrcNumElts, PtrVT));
2484
2485      Ops.push_back(Res);
2486    }
2487  }
2488
2489  setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2490                           VT, &Ops[0], Ops.size()));
2491}
2492
2493void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
2494  const Value *Op0 = I.getOperand(0);
2495  const Value *Op1 = I.getOperand(1);
2496  const Type *AggTy = I.getType();
2497  const Type *ValTy = Op1->getType();
2498  bool IntoUndef = isa<UndefValue>(Op0);
2499  bool FromUndef = isa<UndefValue>(Op1);
2500
2501  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2502                                            I.idx_begin(), I.idx_end());
2503
2504  SmallVector<EVT, 4> AggValueVTs;
2505  ComputeValueVTs(TLI, AggTy, AggValueVTs);
2506  SmallVector<EVT, 4> ValValueVTs;
2507  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2508
2509  unsigned NumAggValues = AggValueVTs.size();
2510  unsigned NumValValues = ValValueVTs.size();
2511  SmallVector<SDValue, 4> Values(NumAggValues);
2512
2513  SDValue Agg = getValue(Op0);
2514  SDValue Val = getValue(Op1);
2515  unsigned i = 0;
2516  // Copy the beginning value(s) from the original aggregate.
2517  for (; i != LinearIndex; ++i)
2518    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2519                SDValue(Agg.getNode(), Agg.getResNo() + i);
2520  // Copy values from the inserted value(s).
2521  for (; i != LinearIndex + NumValValues; ++i)
2522    Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2523                SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2524  // Copy remaining value(s) from the original aggregate.
2525  for (; i != NumAggValues; ++i)
2526    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2527                SDValue(Agg.getNode(), Agg.getResNo() + i);
2528
2529  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2530                           DAG.getVTList(&AggValueVTs[0], NumAggValues),
2531                           &Values[0], NumAggValues));
2532}
2533
2534void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
2535  const Value *Op0 = I.getOperand(0);
2536  const Type *AggTy = Op0->getType();
2537  const Type *ValTy = I.getType();
2538  bool OutOfUndef = isa<UndefValue>(Op0);
2539
2540  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2541                                            I.idx_begin(), I.idx_end());
2542
2543  SmallVector<EVT, 4> ValValueVTs;
2544  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2545
2546  unsigned NumValValues = ValValueVTs.size();
2547  SmallVector<SDValue, 4> Values(NumValValues);
2548
2549  SDValue Agg = getValue(Op0);
2550  // Copy out the selected value(s).
2551  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2552    Values[i - LinearIndex] =
2553      OutOfUndef ?
2554        DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2555        SDValue(Agg.getNode(), Agg.getResNo() + i);
2556
2557  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2558                           DAG.getVTList(&ValValueVTs[0], NumValValues),
2559                           &Values[0], NumValValues));
2560}
2561
2562void SelectionDAGBuilder::visitGetElementPtr(User &I) {
2563  SDValue N = getValue(I.getOperand(0));
2564  const Type *Ty = I.getOperand(0)->getType();
2565
2566  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2567       OI != E; ++OI) {
2568    Value *Idx = *OI;
2569    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2570      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2571      if (Field) {
2572        // N = N + Offset
2573        uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2574        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2575                        DAG.getIntPtrConstant(Offset));
2576      }
2577
2578      Ty = StTy->getElementType(Field);
2579    } else {
2580      Ty = cast<SequentialType>(Ty)->getElementType();
2581
2582      // If this is a constant subscript, handle it quickly.
2583      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2584        if (CI->getZExtValue() == 0) continue;
2585        uint64_t Offs =
2586            TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2587        SDValue OffsVal;
2588        EVT PTy = TLI.getPointerTy();
2589        unsigned PtrBits = PTy.getSizeInBits();
2590        if (PtrBits < 64)
2591          OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2592                                TLI.getPointerTy(),
2593                                DAG.getConstant(Offs, MVT::i64));
2594        else
2595          OffsVal = DAG.getIntPtrConstant(Offs);
2596
2597        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2598                        OffsVal);
2599        continue;
2600      }
2601
2602      // N = N + Idx * ElementSize;
2603      APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
2604                                TD->getTypeAllocSize(Ty));
2605      SDValue IdxN = getValue(Idx);
2606
2607      // If the index is smaller or larger than intptr_t, truncate or extend
2608      // it.
2609      IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
2610
2611      // If this is a multiply by a power of two, turn it into a shl
2612      // immediately.  This is a very common case.
2613      if (ElementSize != 1) {
2614        if (ElementSize.isPowerOf2()) {
2615          unsigned Amt = ElementSize.logBase2();
2616          IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2617                             N.getValueType(), IdxN,
2618                             DAG.getConstant(Amt, TLI.getPointerTy()));
2619        } else {
2620          SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
2621          IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2622                             N.getValueType(), IdxN, Scale);
2623        }
2624      }
2625
2626      N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2627                      N.getValueType(), N, IdxN);
2628    }
2629  }
2630
2631  setValue(&I, N);
2632}
2633
2634void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
2635  // If this is a fixed sized alloca in the entry block of the function,
2636  // allocate it statically on the stack.
2637  if (FuncInfo.StaticAllocaMap.count(&I))
2638    return;   // getValue will auto-populate this.
2639
2640  const Type *Ty = I.getAllocatedType();
2641  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2642  unsigned Align =
2643    std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2644             I.getAlignment());
2645
2646  SDValue AllocSize = getValue(I.getArraySize());
2647
2648  AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2649                          AllocSize,
2650                          DAG.getConstant(TySize, AllocSize.getValueType()));
2651
2652  EVT IntPtr = TLI.getPointerTy();
2653  AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
2654
2655  // Handle alignment.  If the requested alignment is less than or equal to
2656  // the stack alignment, ignore it.  If the size is greater than or equal to
2657  // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2658  unsigned StackAlign =
2659    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2660  if (Align <= StackAlign)
2661    Align = 0;
2662
2663  // Round the size of the allocation up to the stack alignment size
2664  // by add SA-1 to the size.
2665  AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2666                          AllocSize.getValueType(), AllocSize,
2667                          DAG.getIntPtrConstant(StackAlign-1));
2668
2669  // Mask out the low bits for alignment purposes.
2670  AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2671                          AllocSize.getValueType(), AllocSize,
2672                          DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2673
2674  SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2675  SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2676  SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2677                            VTs, Ops, 3);
2678  setValue(&I, DSA);
2679  DAG.setRoot(DSA.getValue(1));
2680
2681  // Inform the Frame Information that we have just allocated a variable-sized
2682  // object.
2683  FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2684}
2685
2686void SelectionDAGBuilder::visitLoad(LoadInst &I) {
2687  const Value *SV = I.getOperand(0);
2688  SDValue Ptr = getValue(SV);
2689
2690  const Type *Ty = I.getType();
2691  bool isVolatile = I.isVolatile();
2692  unsigned Alignment = I.getAlignment();
2693
2694  SmallVector<EVT, 4> ValueVTs;
2695  SmallVector<uint64_t, 4> Offsets;
2696  ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2697  unsigned NumValues = ValueVTs.size();
2698  if (NumValues == 0)
2699    return;
2700
2701  SDValue Root;
2702  bool ConstantMemory = false;
2703  if (I.isVolatile())
2704    // Serialize volatile loads with other side effects.
2705    Root = getRoot();
2706  else if (AA->pointsToConstantMemory(SV)) {
2707    // Do not serialize (non-volatile) loads of constant memory with anything.
2708    Root = DAG.getEntryNode();
2709    ConstantMemory = true;
2710  } else {
2711    // Do not serialize non-volatile loads against each other.
2712    Root = DAG.getRoot();
2713  }
2714
2715  SmallVector<SDValue, 4> Values(NumValues);
2716  SmallVector<SDValue, 4> Chains(NumValues);
2717  EVT PtrVT = Ptr.getValueType();
2718  for (unsigned i = 0; i != NumValues; ++i) {
2719    SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2720                            PtrVT, Ptr,
2721                            DAG.getConstant(Offsets[i], PtrVT));
2722    SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2723                            A, SV, Offsets[i], isVolatile, Alignment);
2724
2725    Values[i] = L;
2726    Chains[i] = L.getValue(1);
2727  }
2728
2729  if (!ConstantMemory) {
2730    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2731                                MVT::Other, &Chains[0], NumValues);
2732    if (isVolatile)
2733      DAG.setRoot(Chain);
2734    else
2735      PendingLoads.push_back(Chain);
2736  }
2737
2738  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2739                           DAG.getVTList(&ValueVTs[0], NumValues),
2740                           &Values[0], NumValues));
2741}
2742
2743void SelectionDAGBuilder::visitStore(StoreInst &I) {
2744  Value *SrcV = I.getOperand(0);
2745  Value *PtrV = I.getOperand(1);
2746
2747  SmallVector<EVT, 4> ValueVTs;
2748  SmallVector<uint64_t, 4> Offsets;
2749  ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2750  unsigned NumValues = ValueVTs.size();
2751  if (NumValues == 0)
2752    return;
2753
2754  // Get the lowered operands. Note that we do this after
2755  // checking if NumResults is zero, because with zero results
2756  // the operands won't have values in the map.
2757  SDValue Src = getValue(SrcV);
2758  SDValue Ptr = getValue(PtrV);
2759
2760  SDValue Root = getRoot();
2761  SmallVector<SDValue, 4> Chains(NumValues);
2762  EVT PtrVT = Ptr.getValueType();
2763  bool isVolatile = I.isVolatile();
2764  unsigned Alignment = I.getAlignment();
2765
2766  for (unsigned i = 0; i != NumValues; ++i) {
2767    SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr,
2768                              DAG.getConstant(Offsets[i], PtrVT));
2769    Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2770                             SDValue(Src.getNode(), Src.getResNo() + i),
2771                             Add, PtrV, Offsets[i], isVolatile, Alignment);
2772  }
2773
2774  DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2775                          MVT::Other, &Chains[0], NumValues));
2776}
2777
2778/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2779/// node.
2780void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
2781                                               unsigned Intrinsic) {
2782  bool HasChain = !I.doesNotAccessMemory();
2783  bool OnlyLoad = HasChain && I.onlyReadsMemory();
2784
2785  // Build the operand list.
2786  SmallVector<SDValue, 8> Ops;
2787  if (HasChain) {  // If this intrinsic has side-effects, chainify it.
2788    if (OnlyLoad) {
2789      // We don't need to serialize loads against other loads.
2790      Ops.push_back(DAG.getRoot());
2791    } else {
2792      Ops.push_back(getRoot());
2793    }
2794  }
2795
2796  // Info is set by getTgtMemInstrinsic
2797  TargetLowering::IntrinsicInfo Info;
2798  bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2799
2800  // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2801  if (!IsTgtIntrinsic)
2802    Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2803
2804  // Add all operands of the call to the operand list.
2805  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2806    SDValue Op = getValue(I.getOperand(i));
2807    assert(TLI.isTypeLegal(Op.getValueType()) &&
2808           "Intrinsic uses a non-legal type?");
2809    Ops.push_back(Op);
2810  }
2811
2812  SmallVector<EVT, 4> ValueVTs;
2813  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2814#ifndef NDEBUG
2815  for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
2816    assert(TLI.isTypeLegal(ValueVTs[Val]) &&
2817           "Intrinsic uses a non-legal type?");
2818  }
2819#endif // NDEBUG
2820
2821  if (HasChain)
2822    ValueVTs.push_back(MVT::Other);
2823
2824  SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
2825
2826  // Create the node.
2827  SDValue Result;
2828  if (IsTgtIntrinsic) {
2829    // This is target intrinsic that touches memory
2830    Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2831                                     VTs, &Ops[0], Ops.size(),
2832                                     Info.memVT, Info.ptrVal, Info.offset,
2833                                     Info.align, Info.vol,
2834                                     Info.readMem, Info.writeMem);
2835  } else if (!HasChain) {
2836    Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2837                         VTs, &Ops[0], Ops.size());
2838  } else if (!I.getType()->isVoidTy()) {
2839    Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2840                         VTs, &Ops[0], Ops.size());
2841  } else {
2842    Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2843                         VTs, &Ops[0], Ops.size());
2844  }
2845
2846  if (HasChain) {
2847    SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2848    if (OnlyLoad)
2849      PendingLoads.push_back(Chain);
2850    else
2851      DAG.setRoot(Chain);
2852  }
2853
2854  if (!I.getType()->isVoidTy()) {
2855    if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2856      EVT VT = TLI.getValueType(PTy);
2857      Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2858    }
2859
2860    setValue(&I, Result);
2861  }
2862}
2863
2864/// GetSignificand - Get the significand and build it into a floating-point
2865/// number with exponent of 1:
2866///
2867///   Op = (Op & 0x007fffff) | 0x3f800000;
2868///
2869/// where Op is the hexidecimal representation of floating point value.
2870static SDValue
2871GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl, unsigned Order) {
2872  SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
2873                           DAG.getConstant(0x007fffff, MVT::i32));
2874  SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
2875                           DAG.getConstant(0x3f800000, MVT::i32));
2876  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
2877}
2878
2879/// GetExponent - Get the exponent:
2880///
2881///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
2882///
2883/// where Op is the hexidecimal representation of floating point value.
2884static SDValue
2885GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
2886            DebugLoc dl, unsigned Order) {
2887  SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
2888                           DAG.getConstant(0x7f800000, MVT::i32));
2889  SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
2890                           DAG.getConstant(23, TLI.getPointerTy()));
2891  SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
2892                           DAG.getConstant(127, MVT::i32));
2893  return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
2894}
2895
2896/// getF32Constant - Get 32-bit floating point constant.
2897static SDValue
2898getF32Constant(SelectionDAG &DAG, unsigned Flt) {
2899  return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
2900}
2901
2902/// Inlined utility function to implement binary input atomic intrinsics for
2903/// visitIntrinsicCall: I is a call instruction
2904///                     Op is the associated NodeType for I
2905const char *
2906SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
2907  SDValue Root = getRoot();
2908  SDValue L =
2909    DAG.getAtomic(Op, getCurDebugLoc(),
2910                  getValue(I.getOperand(2)).getValueType().getSimpleVT(),
2911                  Root,
2912                  getValue(I.getOperand(1)),
2913                  getValue(I.getOperand(2)),
2914                  I.getOperand(1));
2915  setValue(&I, L);
2916  DAG.setRoot(L.getValue(1));
2917  return 0;
2918}
2919
2920// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
2921const char *
2922SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
2923  SDValue Op1 = getValue(I.getOperand(1));
2924  SDValue Op2 = getValue(I.getOperand(2));
2925
2926  SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
2927  setValue(&I, DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2));
2928  return 0;
2929}
2930
2931/// visitExp - Lower an exp intrinsic. Handles the special sequences for
2932/// limited-precision mode.
2933void
2934SelectionDAGBuilder::visitExp(CallInst &I) {
2935  SDValue result;
2936  DebugLoc dl = getCurDebugLoc();
2937
2938  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
2939      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
2940    SDValue Op = getValue(I.getOperand(1));
2941
2942    // Put the exponent in the right bit position for later addition to the
2943    // final result:
2944    //
2945    //   #define LOG2OFe 1.4426950f
2946    //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
2947    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
2948                             getF32Constant(DAG, 0x3fb8aa3b));
2949    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
2950
2951    //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
2952    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
2953    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
2954
2955    //   IntegerPartOfX <<= 23;
2956    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
2957                                 DAG.getConstant(23, TLI.getPointerTy()));
2958
2959    if (LimitFloatPrecision <= 6) {
2960      // For floating-point precision of 6:
2961      //
2962      //   TwoToFractionalPartOfX =
2963      //     0.997535578f +
2964      //       (0.735607626f + 0.252464424f * x) * x;
2965      //
2966      // error 0.0144103317, which is 6 bits
2967      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
2968                               getF32Constant(DAG, 0x3e814304));
2969      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
2970                               getF32Constant(DAG, 0x3f3c50c8));
2971      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
2972      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
2973                               getF32Constant(DAG, 0x3f7f5e7e));
2974      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
2975
2976      // Add the exponent into the result in integer domain.
2977      SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2978                               TwoToFracPartOfX, IntegerPartOfX);
2979
2980      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
2981    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
2982      // For floating-point precision of 12:
2983      //
2984      //   TwoToFractionalPartOfX =
2985      //     0.999892986f +
2986      //       (0.696457318f +
2987      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
2988      //
2989      // 0.000107046256 error, which is 13 to 14 bits
2990      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
2991                               getF32Constant(DAG, 0x3da235e3));
2992      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
2993                               getF32Constant(DAG, 0x3e65b8f3));
2994      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
2995      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
2996                               getF32Constant(DAG, 0x3f324b07));
2997      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
2998      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
2999                               getF32Constant(DAG, 0x3f7ff8fd));
3000      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3001
3002      // Add the exponent into the result in integer domain.
3003      SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3004                               TwoToFracPartOfX, IntegerPartOfX);
3005
3006      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3007    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3008      // For floating-point precision of 18:
3009      //
3010      //   TwoToFractionalPartOfX =
3011      //     0.999999982f +
3012      //       (0.693148872f +
3013      //         (0.240227044f +
3014      //           (0.554906021e-1f +
3015      //             (0.961591928e-2f +
3016      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3017      //
3018      // error 2.47208000*10^(-7), which is better than 18 bits
3019      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3020                               getF32Constant(DAG, 0x3924b03e));
3021      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3022                               getF32Constant(DAG, 0x3ab24b87));
3023      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3024      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3025                               getF32Constant(DAG, 0x3c1d8c17));
3026      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3027      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3028                               getF32Constant(DAG, 0x3d634a1d));
3029      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3030      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3031                               getF32Constant(DAG, 0x3e75fe14));
3032      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3033      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3034                                getF32Constant(DAG, 0x3f317234));
3035      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3036      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3037                                getF32Constant(DAG, 0x3f800000));
3038      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3039                                             MVT::i32, t13);
3040
3041      // Add the exponent into the result in integer domain.
3042      SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3043                                TwoToFracPartOfX, IntegerPartOfX);
3044
3045      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3046    }
3047  } else {
3048    // No special expansion.
3049    result = DAG.getNode(ISD::FEXP, dl,
3050                         getValue(I.getOperand(1)).getValueType(),
3051                         getValue(I.getOperand(1)));
3052  }
3053
3054  setValue(&I, result);
3055}
3056
3057/// visitLog - Lower a log intrinsic. Handles the special sequences for
3058/// limited-precision mode.
3059void
3060SelectionDAGBuilder::visitLog(CallInst &I) {
3061  SDValue result;
3062  DebugLoc dl = getCurDebugLoc();
3063
3064  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3065      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3066    SDValue Op = getValue(I.getOperand(1));
3067    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3068
3069    // Scale the exponent by log(2) [0.69314718f].
3070    SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3071    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3072                                        getF32Constant(DAG, 0x3f317218));
3073
3074    // Get the significand and build it into a floating-point number with
3075    // exponent of 1.
3076    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3077
3078    if (LimitFloatPrecision <= 6) {
3079      // For floating-point precision of 6:
3080      //
3081      //   LogofMantissa =
3082      //     -1.1609546f +
3083      //       (1.4034025f - 0.23903021f * x) * x;
3084      //
3085      // error 0.0034276066, which is better than 8 bits
3086      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3087                               getF32Constant(DAG, 0xbe74c456));
3088      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3089                               getF32Constant(DAG, 0x3fb3a2b1));
3090      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3091      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3092                                          getF32Constant(DAG, 0x3f949a29));
3093
3094      result = DAG.getNode(ISD::FADD, dl,
3095                           MVT::f32, LogOfExponent, LogOfMantissa);
3096    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3097      // For floating-point precision of 12:
3098      //
3099      //   LogOfMantissa =
3100      //     -1.7417939f +
3101      //       (2.8212026f +
3102      //         (-1.4699568f +
3103      //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3104      //
3105      // error 0.000061011436, which is 14 bits
3106      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3107                               getF32Constant(DAG, 0xbd67b6d6));
3108      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3109                               getF32Constant(DAG, 0x3ee4f4b8));
3110      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3111      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3112                               getF32Constant(DAG, 0x3fbc278b));
3113      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3114      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3115                               getF32Constant(DAG, 0x40348e95));
3116      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3117      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3118                                          getF32Constant(DAG, 0x3fdef31a));
3119
3120      result = DAG.getNode(ISD::FADD, dl,
3121                           MVT::f32, LogOfExponent, LogOfMantissa);
3122    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3123      // For floating-point precision of 18:
3124      //
3125      //   LogOfMantissa =
3126      //     -2.1072184f +
3127      //       (4.2372794f +
3128      //         (-3.7029485f +
3129      //           (2.2781945f +
3130      //             (-0.87823314f +
3131      //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3132      //
3133      // error 0.0000023660568, which is better than 18 bits
3134      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3135                               getF32Constant(DAG, 0xbc91e5ac));
3136      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3137                               getF32Constant(DAG, 0x3e4350aa));
3138      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3139      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3140                               getF32Constant(DAG, 0x3f60d3e3));
3141      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3142      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3143                               getF32Constant(DAG, 0x4011cdf0));
3144      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3145      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3146                               getF32Constant(DAG, 0x406cfd1c));
3147      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3148      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3149                               getF32Constant(DAG, 0x408797cb));
3150      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3151      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3152                                          getF32Constant(DAG, 0x4006dcab));
3153
3154      result = DAG.getNode(ISD::FADD, dl,
3155                           MVT::f32, LogOfExponent, LogOfMantissa);
3156    }
3157  } else {
3158    // No special expansion.
3159    result = DAG.getNode(ISD::FLOG, dl,
3160                         getValue(I.getOperand(1)).getValueType(),
3161                         getValue(I.getOperand(1)));
3162  }
3163
3164  setValue(&I, result);
3165}
3166
3167/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3168/// limited-precision mode.
3169void
3170SelectionDAGBuilder::visitLog2(CallInst &I) {
3171  SDValue result;
3172  DebugLoc dl = getCurDebugLoc();
3173
3174  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3175      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3176    SDValue Op = getValue(I.getOperand(1));
3177    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3178
3179    // Get the exponent.
3180    SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3181
3182    // Get the significand and build it into a floating-point number with
3183    // exponent of 1.
3184    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3185
3186    // Different possible minimax approximations of significand in
3187    // floating-point for various degrees of accuracy over [1,2].
3188    if (LimitFloatPrecision <= 6) {
3189      // For floating-point precision of 6:
3190      //
3191      //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3192      //
3193      // error 0.0049451742, which is more than 7 bits
3194      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3195                               getF32Constant(DAG, 0xbeb08fe0));
3196      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3197                               getF32Constant(DAG, 0x40019463));
3198      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3199      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3200                                           getF32Constant(DAG, 0x3fd6633d));
3201
3202      result = DAG.getNode(ISD::FADD, dl,
3203                           MVT::f32, LogOfExponent, Log2ofMantissa);
3204    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3205      // For floating-point precision of 12:
3206      //
3207      //   Log2ofMantissa =
3208      //     -2.51285454f +
3209      //       (4.07009056f +
3210      //         (-2.12067489f +
3211      //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3212      //
3213      // error 0.0000876136000, which is better than 13 bits
3214      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3215                               getF32Constant(DAG, 0xbda7262e));
3216      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3217                               getF32Constant(DAG, 0x3f25280b));
3218      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3219      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3220                               getF32Constant(DAG, 0x4007b923));
3221      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3222      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3223                               getF32Constant(DAG, 0x40823e2f));
3224      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3225      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3226                                           getF32Constant(DAG, 0x4020d29c));
3227
3228      result = DAG.getNode(ISD::FADD, dl,
3229                           MVT::f32, LogOfExponent, Log2ofMantissa);
3230    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3231      // For floating-point precision of 18:
3232      //
3233      //   Log2ofMantissa =
3234      //     -3.0400495f +
3235      //       (6.1129976f +
3236      //         (-5.3420409f +
3237      //           (3.2865683f +
3238      //             (-1.2669343f +
3239      //               (0.27515199f -
3240      //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3241      //
3242      // error 0.0000018516, which is better than 18 bits
3243      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3244                               getF32Constant(DAG, 0xbcd2769e));
3245      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3246                               getF32Constant(DAG, 0x3e8ce0b9));
3247      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3248      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3249                               getF32Constant(DAG, 0x3fa22ae7));
3250      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3251      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3252                               getF32Constant(DAG, 0x40525723));
3253      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3254      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3255                               getF32Constant(DAG, 0x40aaf200));
3256      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3257      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3258                               getF32Constant(DAG, 0x40c39dad));
3259      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3260      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3261                                           getF32Constant(DAG, 0x4042902c));
3262
3263      result = DAG.getNode(ISD::FADD, dl,
3264                           MVT::f32, LogOfExponent, Log2ofMantissa);
3265    }
3266  } else {
3267    // No special expansion.
3268    result = DAG.getNode(ISD::FLOG2, dl,
3269                         getValue(I.getOperand(1)).getValueType(),
3270                         getValue(I.getOperand(1)));
3271  }
3272
3273  setValue(&I, result);
3274}
3275
3276/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3277/// limited-precision mode.
3278void
3279SelectionDAGBuilder::visitLog10(CallInst &I) {
3280  SDValue result;
3281  DebugLoc dl = getCurDebugLoc();
3282
3283  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3284      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3285    SDValue Op = getValue(I.getOperand(1));
3286    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3287
3288    // Scale the exponent by log10(2) [0.30102999f].
3289    SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder);
3290    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3291                                        getF32Constant(DAG, 0x3e9a209a));
3292
3293    // Get the significand and build it into a floating-point number with
3294    // exponent of 1.
3295    SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder);
3296
3297    if (LimitFloatPrecision <= 6) {
3298      // For floating-point precision of 6:
3299      //
3300      //   Log10ofMantissa =
3301      //     -0.50419619f +
3302      //       (0.60948995f - 0.10380950f * x) * x;
3303      //
3304      // error 0.0014886165, which is 6 bits
3305      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3306                               getF32Constant(DAG, 0xbdd49a13));
3307      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3308                               getF32Constant(DAG, 0x3f1c0789));
3309      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3310      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3311                                            getF32Constant(DAG, 0x3f011300));
3312
3313      result = DAG.getNode(ISD::FADD, dl,
3314                           MVT::f32, LogOfExponent, Log10ofMantissa);
3315    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3316      // For floating-point precision of 12:
3317      //
3318      //   Log10ofMantissa =
3319      //     -0.64831180f +
3320      //       (0.91751397f +
3321      //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3322      //
3323      // error 0.00019228036, which is better than 12 bits
3324      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3325                               getF32Constant(DAG, 0x3d431f31));
3326      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3327                               getF32Constant(DAG, 0x3ea21fb2));
3328      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3329      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3330                               getF32Constant(DAG, 0x3f6ae232));
3331      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3332      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3333                                            getF32Constant(DAG, 0x3f25f7c3));
3334
3335      result = DAG.getNode(ISD::FADD, dl,
3336                           MVT::f32, LogOfExponent, Log10ofMantissa);
3337    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3338      // For floating-point precision of 18:
3339      //
3340      //   Log10ofMantissa =
3341      //     -0.84299375f +
3342      //       (1.5327582f +
3343      //         (-1.0688956f +
3344      //           (0.49102474f +
3345      //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3346      //
3347      // error 0.0000037995730, which is better than 18 bits
3348      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3349                               getF32Constant(DAG, 0x3c5d51ce));
3350      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3351                               getF32Constant(DAG, 0x3e00685a));
3352      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3353      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3354                               getF32Constant(DAG, 0x3efb6798));
3355      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3356      SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3357                               getF32Constant(DAG, 0x3f88d192));
3358      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3359      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3360                               getF32Constant(DAG, 0x3fc4316c));
3361      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3362      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3363                                            getF32Constant(DAG, 0x3f57ce70));
3364
3365      result = DAG.getNode(ISD::FADD, dl,
3366                           MVT::f32, LogOfExponent, Log10ofMantissa);
3367    }
3368  } else {
3369    // No special expansion.
3370    result = DAG.getNode(ISD::FLOG10, dl,
3371                         getValue(I.getOperand(1)).getValueType(),
3372                         getValue(I.getOperand(1)));
3373  }
3374
3375  setValue(&I, result);
3376}
3377
3378/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3379/// limited-precision mode.
3380void
3381SelectionDAGBuilder::visitExp2(CallInst &I) {
3382  SDValue result;
3383  DebugLoc dl = getCurDebugLoc();
3384
3385  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3386      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3387    SDValue Op = getValue(I.getOperand(1));
3388
3389    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3390
3391    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3392    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3393    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3394
3395    //   IntegerPartOfX <<= 23;
3396    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3397                                 DAG.getConstant(23, TLI.getPointerTy()));
3398
3399    if (LimitFloatPrecision <= 6) {
3400      // For floating-point precision of 6:
3401      //
3402      //   TwoToFractionalPartOfX =
3403      //     0.997535578f +
3404      //       (0.735607626f + 0.252464424f * x) * x;
3405      //
3406      // error 0.0144103317, which is 6 bits
3407      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3408                               getF32Constant(DAG, 0x3e814304));
3409      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3410                               getF32Constant(DAG, 0x3f3c50c8));
3411      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3412      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3413                               getF32Constant(DAG, 0x3f7f5e7e));
3414      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3415      SDValue TwoToFractionalPartOfX =
3416        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3417
3418      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3419                           MVT::f32, TwoToFractionalPartOfX);
3420    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3421      // For floating-point precision of 12:
3422      //
3423      //   TwoToFractionalPartOfX =
3424      //     0.999892986f +
3425      //       (0.696457318f +
3426      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3427      //
3428      // error 0.000107046256, which is 13 to 14 bits
3429      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3430                               getF32Constant(DAG, 0x3da235e3));
3431      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3432                               getF32Constant(DAG, 0x3e65b8f3));
3433      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3434      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3435                               getF32Constant(DAG, 0x3f324b07));
3436      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3437      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3438                               getF32Constant(DAG, 0x3f7ff8fd));
3439      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3440      SDValue TwoToFractionalPartOfX =
3441        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3442
3443      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3444                           MVT::f32, TwoToFractionalPartOfX);
3445    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3446      // For floating-point precision of 18:
3447      //
3448      //   TwoToFractionalPartOfX =
3449      //     0.999999982f +
3450      //       (0.693148872f +
3451      //         (0.240227044f +
3452      //           (0.554906021e-1f +
3453      //             (0.961591928e-2f +
3454      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3455      // error 2.47208000*10^(-7), which is better than 18 bits
3456      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3457                               getF32Constant(DAG, 0x3924b03e));
3458      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3459                               getF32Constant(DAG, 0x3ab24b87));
3460      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3461      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3462                               getF32Constant(DAG, 0x3c1d8c17));
3463      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3464      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3465                               getF32Constant(DAG, 0x3d634a1d));
3466      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3467      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3468                               getF32Constant(DAG, 0x3e75fe14));
3469      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3470      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3471                                getF32Constant(DAG, 0x3f317234));
3472      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3473      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3474                                getF32Constant(DAG, 0x3f800000));
3475      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3476      SDValue TwoToFractionalPartOfX =
3477        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3478
3479      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3480                           MVT::f32, TwoToFractionalPartOfX);
3481    }
3482  } else {
3483    // No special expansion.
3484    result = DAG.getNode(ISD::FEXP2, dl,
3485                         getValue(I.getOperand(1)).getValueType(),
3486                         getValue(I.getOperand(1)));
3487  }
3488
3489  setValue(&I, result);
3490}
3491
3492/// visitPow - Lower a pow intrinsic. Handles the special sequences for
3493/// limited-precision mode with x == 10.0f.
3494void
3495SelectionDAGBuilder::visitPow(CallInst &I) {
3496  SDValue result;
3497  Value *Val = I.getOperand(1);
3498  DebugLoc dl = getCurDebugLoc();
3499  bool IsExp10 = false;
3500
3501  if (getValue(Val).getValueType() == MVT::f32 &&
3502      getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3503      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3504    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3505      if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3506        APFloat Ten(10.0f);
3507        IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3508      }
3509    }
3510  }
3511
3512  if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3513    SDValue Op = getValue(I.getOperand(2));
3514
3515    // Put the exponent in the right bit position for later addition to the
3516    // final result:
3517    //
3518    //   #define LOG2OF10 3.3219281f
3519    //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
3520    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3521                             getF32Constant(DAG, 0x40549a78));
3522    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3523
3524    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3525    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3526    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3527
3528    //   IntegerPartOfX <<= 23;
3529    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3530                                 DAG.getConstant(23, TLI.getPointerTy()));
3531
3532    if (LimitFloatPrecision <= 6) {
3533      // For floating-point precision of 6:
3534      //
3535      //   twoToFractionalPartOfX =
3536      //     0.997535578f +
3537      //       (0.735607626f + 0.252464424f * x) * x;
3538      //
3539      // error 0.0144103317, which is 6 bits
3540      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3541                               getF32Constant(DAG, 0x3e814304));
3542      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3543                               getF32Constant(DAG, 0x3f3c50c8));
3544      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3545      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3546                               getF32Constant(DAG, 0x3f7f5e7e));
3547      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3548      SDValue TwoToFractionalPartOfX =
3549        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3550
3551      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3552                           MVT::f32, TwoToFractionalPartOfX);
3553    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3554      // For floating-point precision of 12:
3555      //
3556      //   TwoToFractionalPartOfX =
3557      //     0.999892986f +
3558      //       (0.696457318f +
3559      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3560      //
3561      // error 0.000107046256, which is 13 to 14 bits
3562      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3563                               getF32Constant(DAG, 0x3da235e3));
3564      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3565                               getF32Constant(DAG, 0x3e65b8f3));
3566      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3567      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3568                               getF32Constant(DAG, 0x3f324b07));
3569      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3570      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3571                               getF32Constant(DAG, 0x3f7ff8fd));
3572      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3573      SDValue TwoToFractionalPartOfX =
3574        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3575
3576      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3577                           MVT::f32, TwoToFractionalPartOfX);
3578    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3579      // For floating-point precision of 18:
3580      //
3581      //   TwoToFractionalPartOfX =
3582      //     0.999999982f +
3583      //       (0.693148872f +
3584      //         (0.240227044f +
3585      //           (0.554906021e-1f +
3586      //             (0.961591928e-2f +
3587      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3588      // error 2.47208000*10^(-7), which is better than 18 bits
3589      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3590                               getF32Constant(DAG, 0x3924b03e));
3591      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3592                               getF32Constant(DAG, 0x3ab24b87));
3593      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3594      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3595                               getF32Constant(DAG, 0x3c1d8c17));
3596      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3597      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3598                               getF32Constant(DAG, 0x3d634a1d));
3599      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3600      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3601                               getF32Constant(DAG, 0x3e75fe14));
3602      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3603      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3604                                getF32Constant(DAG, 0x3f317234));
3605      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3606      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3607                                getF32Constant(DAG, 0x3f800000));
3608      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3609      SDValue TwoToFractionalPartOfX =
3610        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3611
3612      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3613                           MVT::f32, TwoToFractionalPartOfX);
3614    }
3615  } else {
3616    // No special expansion.
3617    result = DAG.getNode(ISD::FPOW, dl,
3618                         getValue(I.getOperand(1)).getValueType(),
3619                         getValue(I.getOperand(1)),
3620                         getValue(I.getOperand(2)));
3621  }
3622
3623  setValue(&I, result);
3624}
3625
3626
3627/// ExpandPowI - Expand a llvm.powi intrinsic.
3628static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
3629                          SelectionDAG &DAG) {
3630  // If RHS is a constant, we can expand this out to a multiplication tree,
3631  // otherwise we end up lowering to a call to __powidf2 (for example).  When
3632  // optimizing for size, we only want to do this if the expansion would produce
3633  // a small number of multiplies, otherwise we do the full expansion.
3634  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3635    // Get the exponent as a positive value.
3636    unsigned Val = RHSC->getSExtValue();
3637    if ((int)Val < 0) Val = -Val;
3638
3639    // powi(x, 0) -> 1.0
3640    if (Val == 0)
3641      return DAG.getConstantFP(1.0, LHS.getValueType());
3642
3643    Function *F = DAG.getMachineFunction().getFunction();
3644    if (!F->hasFnAttr(Attribute::OptimizeForSize) ||
3645        // If optimizing for size, don't insert too many multiplies.  This
3646        // inserts up to 5 multiplies.
3647        CountPopulation_32(Val)+Log2_32(Val) < 7) {
3648      // We use the simple binary decomposition method to generate the multiply
3649      // sequence.  There are more optimal ways to do this (for example,
3650      // powi(x,15) generates one more multiply than it should), but this has
3651      // the benefit of being both really simple and much better than a libcall.
3652      SDValue Res;  // Logically starts equal to 1.0
3653      SDValue CurSquare = LHS;
3654      while (Val) {
3655        if (Val & 1) {
3656          if (Res.getNode())
3657            Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
3658          else
3659            Res = CurSquare;  // 1.0*CurSquare.
3660        }
3661
3662        CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
3663                                CurSquare, CurSquare);
3664        Val >>= 1;
3665      }
3666
3667      // If the original was negative, invert the result, producing 1/(x*x*x).
3668      if (RHSC->getSExtValue() < 0)
3669        Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
3670                          DAG.getConstantFP(1.0, LHS.getValueType()), Res);
3671      return Res;
3672    }
3673  }
3674
3675  // Otherwise, expand to a libcall.
3676  return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
3677}
3678
3679
3680/// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
3681/// we want to emit this as a call to a named external function, return the name
3682/// otherwise lower it and return null.
3683const char *
3684SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3685  DebugLoc dl = getCurDebugLoc();
3686  SDValue Res;
3687
3688  switch (Intrinsic) {
3689  default:
3690    // By default, turn this into a target intrinsic node.
3691    visitTargetIntrinsic(I, Intrinsic);
3692    return 0;
3693  case Intrinsic::vastart:  visitVAStart(I); return 0;
3694  case Intrinsic::vaend:    visitVAEnd(I); return 0;
3695  case Intrinsic::vacopy:   visitVACopy(I); return 0;
3696  case Intrinsic::returnaddress:
3697    setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3698                             getValue(I.getOperand(1))));
3699    return 0;
3700  case Intrinsic::frameaddress:
3701    setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3702                             getValue(I.getOperand(1))));
3703    return 0;
3704  case Intrinsic::setjmp:
3705    return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3706  case Intrinsic::longjmp:
3707    return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3708  case Intrinsic::memcpy: {
3709    SDValue Op1 = getValue(I.getOperand(1));
3710    SDValue Op2 = getValue(I.getOperand(2));
3711    SDValue Op3 = getValue(I.getOperand(3));
3712    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3713    DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3714                              I.getOperand(1), 0, I.getOperand(2), 0));
3715    return 0;
3716  }
3717  case Intrinsic::memset: {
3718    SDValue Op1 = getValue(I.getOperand(1));
3719    SDValue Op2 = getValue(I.getOperand(2));
3720    SDValue Op3 = getValue(I.getOperand(3));
3721    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3722    DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3723                              I.getOperand(1), 0));
3724    return 0;
3725  }
3726  case Intrinsic::memmove: {
3727    SDValue Op1 = getValue(I.getOperand(1));
3728    SDValue Op2 = getValue(I.getOperand(2));
3729    SDValue Op3 = getValue(I.getOperand(3));
3730    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3731
3732    // If the source and destination are known to not be aliases, we can
3733    // lower memmove as memcpy.
3734    uint64_t Size = -1ULL;
3735    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3736      Size = C->getZExtValue();
3737    if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3738        AliasAnalysis::NoAlias) {
3739      DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3740                                I.getOperand(1), 0, I.getOperand(2), 0));
3741      return 0;
3742    }
3743
3744    DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3745                               I.getOperand(1), 0, I.getOperand(2), 0));
3746    return 0;
3747  }
3748  case Intrinsic::dbg_declare: {
3749    // FIXME: currently, we get here only if OptLevel != CodeGenOpt::None.
3750    // The real handling of this intrinsic is in FastISel.
3751    if (OptLevel != CodeGenOpt::None)
3752      // FIXME: Variable debug info is not supported here.
3753      return 0;
3754    DwarfWriter *DW = DAG.getDwarfWriter();
3755    if (!DW)
3756      return 0;
3757    DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3758    if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
3759      return 0;
3760
3761    MDNode *Variable = DI.getVariable();
3762    Value *Address = DI.getAddress();
3763    if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
3764      Address = BCI->getOperand(0);
3765    AllocaInst *AI = dyn_cast<AllocaInst>(Address);
3766    // Don't handle byval struct arguments or VLAs, for example.
3767    if (!AI)
3768      return 0;
3769    DenseMap<const AllocaInst*, int>::iterator SI =
3770      FuncInfo.StaticAllocaMap.find(AI);
3771    if (SI == FuncInfo.StaticAllocaMap.end())
3772      return 0; // VLAs.
3773    int FI = SI->second;
3774
3775    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo())
3776      if (MDNode *Dbg = DI.getMetadata("dbg"))
3777        MMI->setVariableDbgInfo(Variable, FI, Dbg);
3778    return 0;
3779  }
3780  case Intrinsic::dbg_value: {
3781    // FIXME: currently, we get here only if OptLevel != CodeGenOpt::None.
3782    // The real handling of this intrinsic is in FastISel.
3783    if (OptLevel != CodeGenOpt::None)
3784      // FIXME: Variable debug info is not supported here.
3785      return 0;
3786    DwarfWriter *DW = DAG.getDwarfWriter();
3787    if (!DW)
3788      return 0;
3789    DbgValueInst &DI = cast<DbgValueInst>(I);
3790    if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
3791      return 0;
3792
3793    MDNode *Variable = DI.getVariable();
3794    Value *V = DI.getValue();
3795    if (!V)
3796      return 0;
3797    if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
3798      V = BCI->getOperand(0);
3799    AllocaInst *AI = dyn_cast<AllocaInst>(V);
3800    // Don't handle byval struct arguments or VLAs, for example.
3801    if (!AI)
3802      return 0;
3803    DenseMap<const AllocaInst*, int>::iterator SI =
3804      FuncInfo.StaticAllocaMap.find(AI);
3805    if (SI == FuncInfo.StaticAllocaMap.end())
3806      return 0; // VLAs.
3807    int FI = SI->second;
3808    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo())
3809      if (MDNode *Dbg = DI.getMetadata("dbg"))
3810        MMI->setVariableDbgInfo(Variable, FI, Dbg);
3811    return 0;
3812  }
3813  case Intrinsic::eh_exception: {
3814    // Insert the EXCEPTIONADDR instruction.
3815    assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3816    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3817    SDValue Ops[1];
3818    Ops[0] = DAG.getRoot();
3819    SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3820    setValue(&I, Op);
3821    DAG.setRoot(Op.getValue(1));
3822    return 0;
3823  }
3824
3825  case Intrinsic::eh_selector: {
3826    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3827
3828    if (CurMBB->isLandingPad())
3829      AddCatchInfo(I, MMI, CurMBB);
3830    else {
3831#ifndef NDEBUG
3832      FuncInfo.CatchInfoLost.insert(&I);
3833#endif
3834      // FIXME: Mark exception selector register as live in.  Hack for PR1508.
3835      unsigned Reg = TLI.getExceptionSelectorRegister();
3836      if (Reg) CurMBB->addLiveIn(Reg);
3837    }
3838
3839    // Insert the EHSELECTION instruction.
3840    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3841    SDValue Ops[2];
3842    Ops[0] = getValue(I.getOperand(1));
3843    Ops[1] = getRoot();
3844    SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
3845    DAG.setRoot(Op.getValue(1));
3846    setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32));
3847    return 0;
3848  }
3849
3850  case Intrinsic::eh_typeid_for: {
3851    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3852
3853    if (MMI) {
3854      // Find the type id for the given typeinfo.
3855      GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
3856      unsigned TypeID = MMI->getTypeIDFor(GV);
3857      Res = DAG.getConstant(TypeID, MVT::i32);
3858    } else {
3859      // Return something different to eh_selector.
3860      Res = DAG.getConstant(1, MVT::i32);
3861    }
3862
3863    setValue(&I, Res);
3864    return 0;
3865  }
3866
3867  case Intrinsic::eh_return_i32:
3868  case Intrinsic::eh_return_i64:
3869    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3870      MMI->setCallsEHReturn(true);
3871      DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
3872                              MVT::Other,
3873                              getControlRoot(),
3874                              getValue(I.getOperand(1)),
3875                              getValue(I.getOperand(2))));
3876    } else {
3877      setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
3878    }
3879
3880    return 0;
3881  case Intrinsic::eh_unwind_init:
3882    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3883      MMI->setCallsUnwindInit(true);
3884    }
3885    return 0;
3886  case Intrinsic::eh_dwarf_cfa: {
3887    EVT VT = getValue(I.getOperand(1)).getValueType();
3888    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
3889                                        TLI.getPointerTy());
3890    SDValue Offset = DAG.getNode(ISD::ADD, dl,
3891                                 TLI.getPointerTy(),
3892                                 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
3893                                             TLI.getPointerTy()),
3894                                 CfaArg);
3895    SDValue FA = DAG.getNode(ISD::FRAMEADDR, dl,
3896                             TLI.getPointerTy(),
3897                             DAG.getConstant(0, TLI.getPointerTy()));
3898    setValue(&I, DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
3899                             FA, Offset));
3900    return 0;
3901  }
3902  case Intrinsic::eh_sjlj_callsite: {
3903    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3904    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
3905    assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
3906    assert(MMI->getCurrentCallSite() == 0 && "Overlapping call sites!");
3907
3908    MMI->setCurrentCallSite(CI->getZExtValue());
3909    return 0;
3910  }
3911
3912  case Intrinsic::convertff:
3913  case Intrinsic::convertfsi:
3914  case Intrinsic::convertfui:
3915  case Intrinsic::convertsif:
3916  case Intrinsic::convertuif:
3917  case Intrinsic::convertss:
3918  case Intrinsic::convertsu:
3919  case Intrinsic::convertus:
3920  case Intrinsic::convertuu: {
3921    ISD::CvtCode Code = ISD::CVT_INVALID;
3922    switch (Intrinsic) {
3923    case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
3924    case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
3925    case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
3926    case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
3927    case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
3928    case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
3929    case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
3930    case Intrinsic::convertus:  Code = ISD::CVT_US; break;
3931    case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
3932    }
3933    EVT DestVT = TLI.getValueType(I.getType());
3934    Value *Op1 = I.getOperand(1);
3935    Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
3936                               DAG.getValueType(DestVT),
3937                               DAG.getValueType(getValue(Op1).getValueType()),
3938                               getValue(I.getOperand(2)),
3939                               getValue(I.getOperand(3)),
3940                               Code);
3941    setValue(&I, Res);
3942    return 0;
3943  }
3944  case Intrinsic::sqrt:
3945    setValue(&I, DAG.getNode(ISD::FSQRT, dl,
3946                             getValue(I.getOperand(1)).getValueType(),
3947                             getValue(I.getOperand(1))));
3948    return 0;
3949  case Intrinsic::powi:
3950    setValue(&I, ExpandPowI(dl, getValue(I.getOperand(1)),
3951                            getValue(I.getOperand(2)), DAG));
3952    return 0;
3953  case Intrinsic::sin:
3954    setValue(&I, DAG.getNode(ISD::FSIN, dl,
3955                             getValue(I.getOperand(1)).getValueType(),
3956                             getValue(I.getOperand(1))));
3957    return 0;
3958  case Intrinsic::cos:
3959    setValue(&I, DAG.getNode(ISD::FCOS, dl,
3960                             getValue(I.getOperand(1)).getValueType(),
3961                             getValue(I.getOperand(1))));
3962    return 0;
3963  case Intrinsic::log:
3964    visitLog(I);
3965    return 0;
3966  case Intrinsic::log2:
3967    visitLog2(I);
3968    return 0;
3969  case Intrinsic::log10:
3970    visitLog10(I);
3971    return 0;
3972  case Intrinsic::exp:
3973    visitExp(I);
3974    return 0;
3975  case Intrinsic::exp2:
3976    visitExp2(I);
3977    return 0;
3978  case Intrinsic::pow:
3979    visitPow(I);
3980    return 0;
3981  case Intrinsic::pcmarker: {
3982    SDValue Tmp = getValue(I.getOperand(1));
3983    DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
3984    return 0;
3985  }
3986  case Intrinsic::readcyclecounter: {
3987    SDValue Op = getRoot();
3988    Res = DAG.getNode(ISD::READCYCLECOUNTER, dl,
3989                      DAG.getVTList(MVT::i64, MVT::Other),
3990                      &Op, 1);
3991    setValue(&I, Res);
3992    DAG.setRoot(Res.getValue(1));
3993    return 0;
3994  }
3995  case Intrinsic::bswap:
3996    setValue(&I, DAG.getNode(ISD::BSWAP, dl,
3997                             getValue(I.getOperand(1)).getValueType(),
3998                             getValue(I.getOperand(1))));
3999    return 0;
4000  case Intrinsic::cttz: {
4001    SDValue Arg = getValue(I.getOperand(1));
4002    EVT Ty = Arg.getValueType();
4003    setValue(&I, DAG.getNode(ISD::CTTZ, dl, Ty, Arg));
4004    return 0;
4005  }
4006  case Intrinsic::ctlz: {
4007    SDValue Arg = getValue(I.getOperand(1));
4008    EVT Ty = Arg.getValueType();
4009    setValue(&I, DAG.getNode(ISD::CTLZ, dl, Ty, Arg));
4010    return 0;
4011  }
4012  case Intrinsic::ctpop: {
4013    SDValue Arg = getValue(I.getOperand(1));
4014    EVT Ty = Arg.getValueType();
4015    setValue(&I, DAG.getNode(ISD::CTPOP, dl, Ty, Arg));
4016    return 0;
4017  }
4018  case Intrinsic::stacksave: {
4019    SDValue Op = getRoot();
4020    Res = DAG.getNode(ISD::STACKSAVE, dl,
4021                      DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4022    setValue(&I, Res);
4023    DAG.setRoot(Res.getValue(1));
4024    return 0;
4025  }
4026  case Intrinsic::stackrestore: {
4027    Res = getValue(I.getOperand(1));
4028    DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res));
4029    return 0;
4030  }
4031  case Intrinsic::stackprotector: {
4032    // Emit code into the DAG to store the stack guard onto the stack.
4033    MachineFunction &MF = DAG.getMachineFunction();
4034    MachineFrameInfo *MFI = MF.getFrameInfo();
4035    EVT PtrTy = TLI.getPointerTy();
4036
4037    SDValue Src = getValue(I.getOperand(1));   // The guard's value.
4038    AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4039
4040    int FI = FuncInfo.StaticAllocaMap[Slot];
4041    MFI->setStackProtectorIndex(FI);
4042
4043    SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4044
4045    // Store the stack protector onto the stack.
4046    Res = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4047                       PseudoSourceValue::getFixedStack(FI),
4048                       0, true);
4049    setValue(&I, Res);
4050    DAG.setRoot(Res);
4051    return 0;
4052  }
4053  case Intrinsic::objectsize: {
4054    // If we don't know by now, we're never going to know.
4055    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
4056
4057    assert(CI && "Non-constant type in __builtin_object_size?");
4058
4059    SDValue Arg = getValue(I.getOperand(0));
4060    EVT Ty = Arg.getValueType();
4061
4062    if (CI->getZExtValue() == 0)
4063      Res = DAG.getConstant(-1ULL, Ty);
4064    else
4065      Res = DAG.getConstant(0, Ty);
4066
4067    setValue(&I, Res);
4068    return 0;
4069  }
4070  case Intrinsic::var_annotation:
4071    // Discard annotate attributes
4072    return 0;
4073
4074  case Intrinsic::init_trampoline: {
4075    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4076
4077    SDValue Ops[6];
4078    Ops[0] = getRoot();
4079    Ops[1] = getValue(I.getOperand(1));
4080    Ops[2] = getValue(I.getOperand(2));
4081    Ops[3] = getValue(I.getOperand(3));
4082    Ops[4] = DAG.getSrcValue(I.getOperand(1));
4083    Ops[5] = DAG.getSrcValue(F);
4084
4085    Res = DAG.getNode(ISD::TRAMPOLINE, dl,
4086                      DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4087                      Ops, 6);
4088
4089    setValue(&I, Res);
4090    DAG.setRoot(Res.getValue(1));
4091    return 0;
4092  }
4093  case Intrinsic::gcroot:
4094    if (GFI) {
4095      Value *Alloca = I.getOperand(1);
4096      Constant *TypeMap = cast<Constant>(I.getOperand(2));
4097
4098      FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4099      GFI->addStackRoot(FI->getIndex(), TypeMap);
4100    }
4101    return 0;
4102  case Intrinsic::gcread:
4103  case Intrinsic::gcwrite:
4104    llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4105    return 0;
4106  case Intrinsic::flt_rounds:
4107    setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4108    return 0;
4109  case Intrinsic::trap:
4110    DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4111    return 0;
4112  case Intrinsic::uadd_with_overflow:
4113    return implVisitAluOverflow(I, ISD::UADDO);
4114  case Intrinsic::sadd_with_overflow:
4115    return implVisitAluOverflow(I, ISD::SADDO);
4116  case Intrinsic::usub_with_overflow:
4117    return implVisitAluOverflow(I, ISD::USUBO);
4118  case Intrinsic::ssub_with_overflow:
4119    return implVisitAluOverflow(I, ISD::SSUBO);
4120  case Intrinsic::umul_with_overflow:
4121    return implVisitAluOverflow(I, ISD::UMULO);
4122  case Intrinsic::smul_with_overflow:
4123    return implVisitAluOverflow(I, ISD::SMULO);
4124
4125  case Intrinsic::prefetch: {
4126    SDValue Ops[4];
4127    Ops[0] = getRoot();
4128    Ops[1] = getValue(I.getOperand(1));
4129    Ops[2] = getValue(I.getOperand(2));
4130    Ops[3] = getValue(I.getOperand(3));
4131    DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4132    return 0;
4133  }
4134
4135  case Intrinsic::memory_barrier: {
4136    SDValue Ops[6];
4137    Ops[0] = getRoot();
4138    for (int x = 1; x < 6; ++x)
4139      Ops[x] = getValue(I.getOperand(x));
4140
4141    DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4142    return 0;
4143  }
4144  case Intrinsic::atomic_cmp_swap: {
4145    SDValue Root = getRoot();
4146    SDValue L =
4147      DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4148                    getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4149                    Root,
4150                    getValue(I.getOperand(1)),
4151                    getValue(I.getOperand(2)),
4152                    getValue(I.getOperand(3)),
4153                    I.getOperand(1));
4154    setValue(&I, L);
4155    DAG.setRoot(L.getValue(1));
4156    return 0;
4157  }
4158  case Intrinsic::atomic_load_add:
4159    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4160  case Intrinsic::atomic_load_sub:
4161    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4162  case Intrinsic::atomic_load_or:
4163    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4164  case Intrinsic::atomic_load_xor:
4165    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4166  case Intrinsic::atomic_load_and:
4167    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4168  case Intrinsic::atomic_load_nand:
4169    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4170  case Intrinsic::atomic_load_max:
4171    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4172  case Intrinsic::atomic_load_min:
4173    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4174  case Intrinsic::atomic_load_umin:
4175    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4176  case Intrinsic::atomic_load_umax:
4177    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4178  case Intrinsic::atomic_swap:
4179    return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4180
4181  case Intrinsic::invariant_start:
4182  case Intrinsic::lifetime_start:
4183    // Discard region information.
4184    setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
4185    return 0;
4186  case Intrinsic::invariant_end:
4187  case Intrinsic::lifetime_end:
4188    // Discard region information.
4189    return 0;
4190  }
4191}
4192
4193/// Test if the given instruction is in a position to be optimized
4194/// with a tail-call. This roughly means that it's in a block with
4195/// a return and there's nothing that needs to be scheduled
4196/// between it and the return.
4197///
4198/// This function only tests target-independent requirements.
4199static bool
4200isInTailCallPosition(const Instruction *I, Attributes CalleeRetAttr,
4201                     const TargetLowering &TLI) {
4202  const BasicBlock *ExitBB = I->getParent();
4203  const TerminatorInst *Term = ExitBB->getTerminator();
4204  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4205  const Function *F = ExitBB->getParent();
4206
4207  // The block must end in a return statement or an unreachable.
4208  if (!Ret && !isa<UnreachableInst>(Term)) return false;
4209
4210  // If I will have a chain, make sure no other instruction that will have a
4211  // chain interposes between I and the return.
4212  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4213      !I->isSafeToSpeculativelyExecute())
4214    for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4215         --BBI) {
4216      if (&*BBI == I)
4217        break;
4218      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4219          !BBI->isSafeToSpeculativelyExecute())
4220        return false;
4221    }
4222
4223  // If the block ends with a void return or unreachable, it doesn't matter
4224  // what the call's return type is.
4225  if (!Ret || Ret->getNumOperands() == 0) return true;
4226
4227  // If the return value is undef, it doesn't matter what the call's
4228  // return type is.
4229  if (isa<UndefValue>(Ret->getOperand(0))) return true;
4230
4231  // Conservatively require the attributes of the call to match those of
4232  // the return. Ignore noalias because it doesn't affect the call sequence.
4233  unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
4234  if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
4235    return false;
4236
4237  // Otherwise, make sure the unmodified return value of I is the return value.
4238  for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4239       U = dyn_cast<Instruction>(U->getOperand(0))) {
4240    if (!U)
4241      return false;
4242    if (!U->hasOneUse())
4243      return false;
4244    if (U == I)
4245      break;
4246    // Check for a truly no-op truncate.
4247    if (isa<TruncInst>(U) &&
4248        TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4249      continue;
4250    // Check for a truly no-op bitcast.
4251    if (isa<BitCastInst>(U) &&
4252        (U->getOperand(0)->getType() == U->getType() ||
4253         (isa<PointerType>(U->getOperand(0)->getType()) &&
4254          isa<PointerType>(U->getType()))))
4255      continue;
4256    // Otherwise it's not a true no-op.
4257    return false;
4258  }
4259
4260  return true;
4261}
4262
4263void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
4264                                      bool isTailCall,
4265                                      MachineBasicBlock *LandingPad) {
4266  const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4267  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4268  const Type *RetTy = FTy->getReturnType();
4269  MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4270  unsigned BeginLabel = 0, EndLabel = 0;
4271
4272  TargetLowering::ArgListTy Args;
4273  TargetLowering::ArgListEntry Entry;
4274  Args.reserve(CS.arg_size());
4275
4276  // Check whether the function can return without sret-demotion.
4277  SmallVector<EVT, 4> OutVTs;
4278  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
4279  SmallVector<uint64_t, 4> Offsets;
4280  getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
4281                OutVTs, OutsFlags, TLI, &Offsets);
4282
4283  bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
4284                        FTy->isVarArg(), OutVTs, OutsFlags, DAG);
4285
4286  SDValue DemoteStackSlot;
4287
4288  if (!CanLowerReturn) {
4289    uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
4290                      FTy->getReturnType());
4291    unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(
4292                      FTy->getReturnType());
4293    MachineFunction &MF = DAG.getMachineFunction();
4294    int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
4295    const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
4296
4297    DemoteStackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
4298    Entry.Node = DemoteStackSlot;
4299    Entry.Ty = StackSlotPtrType;
4300    Entry.isSExt = false;
4301    Entry.isZExt = false;
4302    Entry.isInReg = false;
4303    Entry.isSRet = true;
4304    Entry.isNest = false;
4305    Entry.isByVal = false;
4306    Entry.Alignment = Align;
4307    Args.push_back(Entry);
4308    RetTy = Type::getVoidTy(FTy->getContext());
4309  }
4310
4311  for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4312       i != e; ++i) {
4313    SDValue ArgNode = getValue(*i);
4314    Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4315
4316    unsigned attrInd = i - CS.arg_begin() + 1;
4317    Entry.isSExt  = CS.paramHasAttr(attrInd, Attribute::SExt);
4318    Entry.isZExt  = CS.paramHasAttr(attrInd, Attribute::ZExt);
4319    Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4320    Entry.isSRet  = CS.paramHasAttr(attrInd, Attribute::StructRet);
4321    Entry.isNest  = CS.paramHasAttr(attrInd, Attribute::Nest);
4322    Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4323    Entry.Alignment = CS.getParamAlignment(attrInd);
4324    Args.push_back(Entry);
4325  }
4326
4327  if (LandingPad && MMI) {
4328    // Insert a label before the invoke call to mark the try range.  This can be
4329    // used to detect deletion of the invoke via the MachineModuleInfo.
4330    BeginLabel = MMI->NextLabelID();
4331
4332    // For SjLj, keep track of which landing pads go with which invokes
4333    // so as to maintain the ordering of pads in the LSDA.
4334    unsigned CallSiteIndex = MMI->getCurrentCallSite();
4335    if (CallSiteIndex) {
4336      MMI->setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
4337      // Now that the call site is handled, stop tracking it.
4338      MMI->setCurrentCallSite(0);
4339    }
4340
4341    // Both PendingLoads and PendingExports must be flushed here;
4342    // this call might not return.
4343    (void)getRoot();
4344    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4345                             getControlRoot(), BeginLabel));
4346  }
4347
4348  // Check if target-independent constraints permit a tail call here.
4349  // Target-dependent constraints are checked within TLI.LowerCallTo.
4350  if (isTailCall &&
4351      !isInTailCallPosition(CS.getInstruction(),
4352                            CS.getAttributes().getRetAttributes(),
4353                            TLI))
4354    isTailCall = false;
4355
4356  std::pair<SDValue,SDValue> Result =
4357    TLI.LowerCallTo(getRoot(), RetTy,
4358                    CS.paramHasAttr(0, Attribute::SExt),
4359                    CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4360                    CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4361                    CS.getCallingConv(),
4362                    isTailCall,
4363                    !CS.getInstruction()->use_empty(),
4364                    Callee, Args, DAG, getCurDebugLoc(), SDNodeOrder);
4365  assert((isTailCall || Result.second.getNode()) &&
4366         "Non-null chain expected with non-tail call!");
4367  assert((Result.second.getNode() || !Result.first.getNode()) &&
4368         "Null value expected with tail call!");
4369  if (Result.first.getNode()) {
4370    setValue(CS.getInstruction(), Result.first);
4371  } else if (!CanLowerReturn && Result.second.getNode()) {
4372    // The instruction result is the result of loading from the
4373    // hidden sret parameter.
4374    SmallVector<EVT, 1> PVTs;
4375    const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
4376
4377    ComputeValueVTs(TLI, PtrRetTy, PVTs);
4378    assert(PVTs.size() == 1 && "Pointers should fit in one register");
4379    EVT PtrVT = PVTs[0];
4380    unsigned NumValues = OutVTs.size();
4381    SmallVector<SDValue, 4> Values(NumValues);
4382    SmallVector<SDValue, 4> Chains(NumValues);
4383
4384    for (unsigned i = 0; i < NumValues; ++i) {
4385      SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
4386                                DemoteStackSlot,
4387                                DAG.getConstant(Offsets[i], PtrVT));
4388      SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
4389                              Add, NULL, Offsets[i], false, 1);
4390      Values[i] = L;
4391      Chains[i] = L.getValue(1);
4392    }
4393
4394    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
4395                                MVT::Other, &Chains[0], NumValues);
4396    PendingLoads.push_back(Chain);
4397
4398    // Collect the legal value parts into potentially illegal values
4399    // that correspond to the original function's return values.
4400    SmallVector<EVT, 4> RetTys;
4401    RetTy = FTy->getReturnType();
4402    ComputeValueVTs(TLI, RetTy, RetTys);
4403    ISD::NodeType AssertOp = ISD::DELETED_NODE;
4404    SmallVector<SDValue, 4> ReturnValues;
4405    unsigned CurReg = 0;
4406    for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
4407      EVT VT = RetTys[I];
4408      EVT RegisterVT = TLI.getRegisterType(RetTy->getContext(), VT);
4409      unsigned NumRegs = TLI.getNumRegisters(RetTy->getContext(), VT);
4410
4411      SDValue ReturnValue =
4412        getCopyFromParts(DAG, getCurDebugLoc(), SDNodeOrder, &Values[CurReg], NumRegs,
4413                         RegisterVT, VT, AssertOp);
4414      ReturnValues.push_back(ReturnValue);
4415      CurReg += NumRegs;
4416    }
4417
4418    setValue(CS.getInstruction(),
4419             DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
4420                         DAG.getVTList(&RetTys[0], RetTys.size()),
4421                         &ReturnValues[0], ReturnValues.size()));
4422
4423  }
4424
4425  // As a special case, a null chain means that a tail call has been emitted and
4426  // the DAG root is already updated.
4427  if (Result.second.getNode())
4428    DAG.setRoot(Result.second);
4429  else
4430    HasTailCall = true;
4431
4432  if (LandingPad && MMI) {
4433    // Insert a label at the end of the invoke call to mark the try range.  This
4434    // can be used to detect deletion of the invoke via the MachineModuleInfo.
4435    EndLabel = MMI->NextLabelID();
4436    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4437                             getRoot(), EndLabel));
4438
4439    // Inform MachineModuleInfo of range.
4440    MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4441  }
4442}
4443
4444/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
4445/// value is equal or not-equal to zero.
4446static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
4447  for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
4448       UI != E; ++UI) {
4449    if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
4450      if (IC->isEquality())
4451        if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
4452          if (C->isNullValue())
4453            continue;
4454    // Unknown instruction.
4455    return false;
4456  }
4457  return true;
4458}
4459
4460static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy,
4461                             SelectionDAGBuilder &Builder) {
4462
4463  // Check to see if this load can be trivially constant folded, e.g. if the
4464  // input is from a string literal.
4465  if (Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
4466    // Cast pointer to the type we really want to load.
4467    LoadInput = ConstantExpr::getBitCast(LoadInput,
4468                                         PointerType::getUnqual(LoadTy));
4469
4470    if (Constant *LoadCst = ConstantFoldLoadFromConstPtr(LoadInput, Builder.TD))
4471      return Builder.getValue(LoadCst);
4472  }
4473
4474  // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
4475  // still constant memory, the input chain can be the entry node.
4476  SDValue Root;
4477  bool ConstantMemory = false;
4478
4479  // Do not serialize (non-volatile) loads of constant memory with anything.
4480  if (Builder.AA->pointsToConstantMemory(PtrVal)) {
4481    Root = Builder.DAG.getEntryNode();
4482    ConstantMemory = true;
4483  } else {
4484    // Do not serialize non-volatile loads against each other.
4485    Root = Builder.DAG.getRoot();
4486  }
4487
4488  SDValue Ptr = Builder.getValue(PtrVal);
4489  SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root,
4490                                        Ptr, PtrVal /*SrcValue*/, 0/*SVOffset*/,
4491                                        false /*volatile*/, 1 /* align=1 */);
4492
4493  if (!ConstantMemory)
4494    Builder.PendingLoads.push_back(LoadVal.getValue(1));
4495  return LoadVal;
4496}
4497
4498
4499/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
4500/// If so, return true and lower it, otherwise return false and it will be
4501/// lowered like a normal call.
4502bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) {
4503  // Verify that the prototype makes sense.  int memcmp(void*,void*,size_t)
4504  if (I.getNumOperands() != 4)
4505    return false;
4506
4507  Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
4508  if (!isa<PointerType>(LHS->getType()) || !isa<PointerType>(RHS->getType()) ||
4509      !isa<IntegerType>(I.getOperand(3)->getType()) ||
4510      !isa<IntegerType>(I.getType()))
4511    return false;
4512
4513  ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
4514
4515  // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
4516  // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
4517  if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
4518    bool ActuallyDoIt = true;
4519    MVT LoadVT;
4520    const Type *LoadTy;
4521    switch (Size->getZExtValue()) {
4522    default:
4523      LoadVT = MVT::Other;
4524      LoadTy = 0;
4525      ActuallyDoIt = false;
4526      break;
4527    case 2:
4528      LoadVT = MVT::i16;
4529      LoadTy = Type::getInt16Ty(Size->getContext());
4530      break;
4531    case 4:
4532      LoadVT = MVT::i32;
4533      LoadTy = Type::getInt32Ty(Size->getContext());
4534      break;
4535    case 8:
4536      LoadVT = MVT::i64;
4537      LoadTy = Type::getInt64Ty(Size->getContext());
4538      break;
4539        /*
4540    case 16:
4541      LoadVT = MVT::v4i32;
4542      LoadTy = Type::getInt32Ty(Size->getContext());
4543      LoadTy = VectorType::get(LoadTy, 4);
4544      break;
4545         */
4546    }
4547
4548    // This turns into unaligned loads.  We only do this if the target natively
4549    // supports the MVT we'll be loading or if it is small enough (<= 4) that
4550    // we'll only produce a small number of byte loads.
4551
4552    // Require that we can find a legal MVT, and only do this if the target
4553    // supports unaligned loads of that type.  Expanding into byte loads would
4554    // bloat the code.
4555    if (ActuallyDoIt && Size->getZExtValue() > 4) {
4556      // TODO: Handle 5 byte compare as 4-byte + 1 byte.
4557      // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
4558      if (!TLI.isTypeLegal(LoadVT) ||!TLI.allowsUnalignedMemoryAccesses(LoadVT))
4559        ActuallyDoIt = false;
4560    }
4561
4562    if (ActuallyDoIt) {
4563      SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
4564      SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
4565
4566      SDValue Res = DAG.getSetCC(getCurDebugLoc(), MVT::i1, LHSVal, RHSVal,
4567                                 ISD::SETNE);
4568      EVT CallVT = TLI.getValueType(I.getType(), true);
4569      setValue(&I, DAG.getZExtOrTrunc(Res, getCurDebugLoc(), CallVT));
4570      return true;
4571    }
4572  }
4573
4574
4575  return false;
4576}
4577
4578
4579void SelectionDAGBuilder::visitCall(CallInst &I) {
4580  const char *RenameFn = 0;
4581  if (Function *F = I.getCalledFunction()) {
4582    if (F->isDeclaration()) {
4583      const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4584      if (II) {
4585        if (unsigned IID = II->getIntrinsicID(F)) {
4586          RenameFn = visitIntrinsicCall(I, IID);
4587          if (!RenameFn)
4588            return;
4589        }
4590      }
4591      if (unsigned IID = F->getIntrinsicID()) {
4592        RenameFn = visitIntrinsicCall(I, IID);
4593        if (!RenameFn)
4594          return;
4595      }
4596    }
4597
4598    // Check for well-known libc/libm calls.  If the function is internal, it
4599    // can't be a library call.
4600    if (!F->hasLocalLinkage() && F->hasName()) {
4601      StringRef Name = F->getName();
4602      if (Name == "copysign" || Name == "copysignf") {
4603        if (I.getNumOperands() == 3 &&   // Basic sanity checks.
4604            I.getOperand(1)->getType()->isFloatingPoint() &&
4605            I.getType() == I.getOperand(1)->getType() &&
4606            I.getType() == I.getOperand(2)->getType()) {
4607          SDValue LHS = getValue(I.getOperand(1));
4608          SDValue RHS = getValue(I.getOperand(2));
4609          setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4610                                   LHS.getValueType(), LHS, RHS));
4611          return;
4612        }
4613      } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
4614        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4615            I.getOperand(1)->getType()->isFloatingPoint() &&
4616            I.getType() == I.getOperand(1)->getType()) {
4617          SDValue Tmp = getValue(I.getOperand(1));
4618          setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4619                                   Tmp.getValueType(), Tmp));
4620          return;
4621        }
4622      } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
4623        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4624            I.getOperand(1)->getType()->isFloatingPoint() &&
4625            I.getType() == I.getOperand(1)->getType() &&
4626            I.onlyReadsMemory()) {
4627          SDValue Tmp = getValue(I.getOperand(1));
4628          setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4629                                   Tmp.getValueType(), Tmp));
4630          return;
4631        }
4632      } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
4633        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4634            I.getOperand(1)->getType()->isFloatingPoint() &&
4635            I.getType() == I.getOperand(1)->getType() &&
4636            I.onlyReadsMemory()) {
4637          SDValue Tmp = getValue(I.getOperand(1));
4638          setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4639                                   Tmp.getValueType(), Tmp));
4640          return;
4641        }
4642      } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
4643        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4644            I.getOperand(1)->getType()->isFloatingPoint() &&
4645            I.getType() == I.getOperand(1)->getType() &&
4646            I.onlyReadsMemory()) {
4647          SDValue Tmp = getValue(I.getOperand(1));
4648          setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
4649                                   Tmp.getValueType(), Tmp));
4650          return;
4651        }
4652      } else if (Name == "memcmp") {
4653        if (visitMemCmpCall(I))
4654          return;
4655      }
4656    }
4657  } else if (isa<InlineAsm>(I.getOperand(0))) {
4658    visitInlineAsm(&I);
4659    return;
4660  }
4661
4662  SDValue Callee;
4663  if (!RenameFn)
4664    Callee = getValue(I.getOperand(0));
4665  else
4666    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4667
4668  // Check if we can potentially perform a tail call. More detailed checking is
4669  // be done within LowerCallTo, after more information about the call is known.
4670  LowerCallTo(&I, Callee, I.isTailCall());
4671}
4672
4673/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4674/// this value and returns the result as a ValueVT value.  This uses
4675/// Chain/Flag as the input and updates them for the output Chain/Flag.
4676/// If the Flag pointer is NULL, no flag is used.
4677SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4678                                      unsigned Order, SDValue &Chain,
4679                                      SDValue *Flag) const {
4680  // Assemble the legal parts into the final values.
4681  SmallVector<SDValue, 4> Values(ValueVTs.size());
4682  SmallVector<SDValue, 8> Parts;
4683  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4684    // Copy the legal parts from the registers.
4685    EVT ValueVT = ValueVTs[Value];
4686    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4687    EVT RegisterVT = RegVTs[Value];
4688
4689    Parts.resize(NumRegs);
4690    for (unsigned i = 0; i != NumRegs; ++i) {
4691      SDValue P;
4692      if (Flag == 0) {
4693        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4694      } else {
4695        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4696        *Flag = P.getValue(2);
4697      }
4698
4699      Chain = P.getValue(1);
4700
4701      // If the source register was virtual and if we know something about it,
4702      // add an assert node.
4703      if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4704          RegisterVT.isInteger() && !RegisterVT.isVector()) {
4705        unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4706        FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4707        if (FLI.LiveOutRegInfo.size() > SlotNo) {
4708          FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4709
4710          unsigned RegSize = RegisterVT.getSizeInBits();
4711          unsigned NumSignBits = LOI.NumSignBits;
4712          unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4713
4714          // FIXME: We capture more information than the dag can represent.  For
4715          // now, just use the tightest assertzext/assertsext possible.
4716          bool isSExt = true;
4717          EVT FromVT(MVT::Other);
4718          if (NumSignBits == RegSize)
4719            isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
4720          else if (NumZeroBits >= RegSize-1)
4721            isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
4722          else if (NumSignBits > RegSize-8)
4723            isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
4724          else if (NumZeroBits >= RegSize-8)
4725            isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
4726          else if (NumSignBits > RegSize-16)
4727            isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
4728          else if (NumZeroBits >= RegSize-16)
4729            isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4730          else if (NumSignBits > RegSize-32)
4731            isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
4732          else if (NumZeroBits >= RegSize-32)
4733            isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4734
4735          if (FromVT != MVT::Other)
4736            P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4737                            RegisterVT, P, DAG.getValueType(FromVT));
4738        }
4739      }
4740
4741      Parts[i] = P;
4742    }
4743
4744    Values[Value] = getCopyFromParts(DAG, dl, Order, Parts.begin(),
4745                                     NumRegs, RegisterVT, ValueVT);
4746    Part += NumRegs;
4747    Parts.clear();
4748  }
4749
4750  return DAG.getNode(ISD::MERGE_VALUES, dl,
4751                     DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4752                     &Values[0], ValueVTs.size());
4753}
4754
4755/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4756/// specified value into the registers specified by this object.  This uses
4757/// Chain/Flag as the input and updates them for the output Chain/Flag.
4758/// If the Flag pointer is NULL, no flag is used.
4759void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4760                                 unsigned Order, SDValue &Chain,
4761                                 SDValue *Flag) const {
4762  // Get the list of the values's legal parts.
4763  unsigned NumRegs = Regs.size();
4764  SmallVector<SDValue, 8> Parts(NumRegs);
4765  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4766    EVT ValueVT = ValueVTs[Value];
4767    unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4768    EVT RegisterVT = RegVTs[Value];
4769
4770    getCopyToParts(DAG, dl, Order,
4771                   Val.getValue(Val.getResNo() + Value),
4772                   &Parts[Part], NumParts, RegisterVT);
4773    Part += NumParts;
4774  }
4775
4776  // Copy the parts into the registers.
4777  SmallVector<SDValue, 8> Chains(NumRegs);
4778  for (unsigned i = 0; i != NumRegs; ++i) {
4779    SDValue Part;
4780    if (Flag == 0) {
4781      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4782    } else {
4783      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4784      *Flag = Part.getValue(1);
4785    }
4786
4787    Chains[i] = Part.getValue(0);
4788  }
4789
4790  if (NumRegs == 1 || Flag)
4791    // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4792    // flagged to it. That is the CopyToReg nodes and the user are considered
4793    // a single scheduling unit. If we create a TokenFactor and return it as
4794    // chain, then the TokenFactor is both a predecessor (operand) of the
4795    // user as well as a successor (the TF operands are flagged to the user).
4796    // c1, f1 = CopyToReg
4797    // c2, f2 = CopyToReg
4798    // c3     = TokenFactor c1, c2
4799    // ...
4800    //        = op c3, ..., f2
4801    Chain = Chains[NumRegs-1];
4802  else
4803    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4804}
4805
4806/// AddInlineAsmOperands - Add this value to the specified inlineasm node
4807/// operand list.  This adds the code marker and includes the number of
4808/// values added into it.
4809void RegsForValue::AddInlineAsmOperands(unsigned Code,
4810                                        bool HasMatching,unsigned MatchingIdx,
4811                                        SelectionDAG &DAG, unsigned Order,
4812                                        std::vector<SDValue> &Ops) const {
4813  assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4814  unsigned Flag = Code | (Regs.size() << 3);
4815  if (HasMatching)
4816    Flag |= 0x80000000 | (MatchingIdx << 16);
4817  SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
4818  Ops.push_back(Res);
4819
4820  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4821    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
4822    EVT RegisterVT = RegVTs[Value];
4823    for (unsigned i = 0; i != NumRegs; ++i) {
4824      assert(Reg < Regs.size() && "Mismatch in # registers expected");
4825      Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4826    }
4827  }
4828}
4829
4830/// isAllocatableRegister - If the specified register is safe to allocate,
4831/// i.e. it isn't a stack pointer or some other special register, return the
4832/// register class for the register.  Otherwise, return null.
4833static const TargetRegisterClass *
4834isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4835                      const TargetLowering &TLI,
4836                      const TargetRegisterInfo *TRI) {
4837  EVT FoundVT = MVT::Other;
4838  const TargetRegisterClass *FoundRC = 0;
4839  for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4840       E = TRI->regclass_end(); RCI != E; ++RCI) {
4841    EVT ThisVT = MVT::Other;
4842
4843    const TargetRegisterClass *RC = *RCI;
4844    // If none of the the value types for this register class are valid, we
4845    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
4846    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4847         I != E; ++I) {
4848      if (TLI.isTypeLegal(*I)) {
4849        // If we have already found this register in a different register class,
4850        // choose the one with the largest VT specified.  For example, on
4851        // PowerPC, we favor f64 register classes over f32.
4852        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4853          ThisVT = *I;
4854          break;
4855        }
4856      }
4857    }
4858
4859    if (ThisVT == MVT::Other) continue;
4860
4861    // NOTE: This isn't ideal.  In particular, this might allocate the
4862    // frame pointer in functions that need it (due to them not being taken
4863    // out of allocation, because a variable sized allocation hasn't been seen
4864    // yet).  This is a slight code pessimization, but should still work.
4865    for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4866         E = RC->allocation_order_end(MF); I != E; ++I)
4867      if (*I == Reg) {
4868        // We found a matching register class.  Keep looking at others in case
4869        // we find one with larger registers that this physreg is also in.
4870        FoundRC = RC;
4871        FoundVT = ThisVT;
4872        break;
4873      }
4874  }
4875  return FoundRC;
4876}
4877
4878
4879namespace llvm {
4880/// AsmOperandInfo - This contains information for each constraint that we are
4881/// lowering.
4882class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4883    public TargetLowering::AsmOperandInfo {
4884public:
4885  /// CallOperand - If this is the result output operand or a clobber
4886  /// this is null, otherwise it is the incoming operand to the CallInst.
4887  /// This gets modified as the asm is processed.
4888  SDValue CallOperand;
4889
4890  /// AssignedRegs - If this is a register or register class operand, this
4891  /// contains the set of register corresponding to the operand.
4892  RegsForValue AssignedRegs;
4893
4894  explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4895    : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4896  }
4897
4898  /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4899  /// busy in OutputRegs/InputRegs.
4900  void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4901                         std::set<unsigned> &OutputRegs,
4902                         std::set<unsigned> &InputRegs,
4903                         const TargetRegisterInfo &TRI) const {
4904    if (isOutReg) {
4905      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4906        MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4907    }
4908    if (isInReg) {
4909      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4910        MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4911    }
4912  }
4913
4914  /// getCallOperandValEVT - Return the EVT of the Value* that this operand
4915  /// corresponds to.  If there is no Value* for this operand, it returns
4916  /// MVT::Other.
4917  EVT getCallOperandValEVT(LLVMContext &Context,
4918                           const TargetLowering &TLI,
4919                           const TargetData *TD) const {
4920    if (CallOperandVal == 0) return MVT::Other;
4921
4922    if (isa<BasicBlock>(CallOperandVal))
4923      return TLI.getPointerTy();
4924
4925    const llvm::Type *OpTy = CallOperandVal->getType();
4926
4927    // If this is an indirect operand, the operand is a pointer to the
4928    // accessed type.
4929    if (isIndirect) {
4930      const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
4931      if (!PtrTy)
4932        llvm_report_error("Indirect operand for inline asm not a pointer!");
4933      OpTy = PtrTy->getElementType();
4934    }
4935
4936    // If OpTy is not a single value, it may be a struct/union that we
4937    // can tile with integers.
4938    if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4939      unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4940      switch (BitSize) {
4941      default: break;
4942      case 1:
4943      case 8:
4944      case 16:
4945      case 32:
4946      case 64:
4947      case 128:
4948        OpTy = IntegerType::get(Context, BitSize);
4949        break;
4950      }
4951    }
4952
4953    return TLI.getValueType(OpTy, true);
4954  }
4955
4956private:
4957  /// MarkRegAndAliases - Mark the specified register and all aliases in the
4958  /// specified set.
4959  static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4960                                const TargetRegisterInfo &TRI) {
4961    assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4962    Regs.insert(Reg);
4963    if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4964      for (; *Aliases; ++Aliases)
4965        Regs.insert(*Aliases);
4966  }
4967};
4968} // end llvm namespace.
4969
4970
4971/// GetRegistersForValue - Assign registers (virtual or physical) for the
4972/// specified operand.  We prefer to assign virtual registers, to allow the
4973/// register allocator to handle the assignment process.  However, if the asm
4974/// uses features that we can't model on machineinstrs, we have SDISel do the
4975/// allocation.  This produces generally horrible, but correct, code.
4976///
4977///   OpInfo describes the operand.
4978///   Input and OutputRegs are the set of already allocated physical registers.
4979///
4980void SelectionDAGBuilder::
4981GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4982                     std::set<unsigned> &OutputRegs,
4983                     std::set<unsigned> &InputRegs) {
4984  LLVMContext &Context = FuncInfo.Fn->getContext();
4985
4986  // Compute whether this value requires an input register, an output register,
4987  // or both.
4988  bool isOutReg = false;
4989  bool isInReg = false;
4990  switch (OpInfo.Type) {
4991  case InlineAsm::isOutput:
4992    isOutReg = true;
4993
4994    // If there is an input constraint that matches this, we need to reserve
4995    // the input register so no other inputs allocate to it.
4996    isInReg = OpInfo.hasMatchingInput();
4997    break;
4998  case InlineAsm::isInput:
4999    isInReg = true;
5000    isOutReg = false;
5001    break;
5002  case InlineAsm::isClobber:
5003    isOutReg = true;
5004    isInReg = true;
5005    break;
5006  }
5007
5008
5009  MachineFunction &MF = DAG.getMachineFunction();
5010  SmallVector<unsigned, 4> Regs;
5011
5012  // If this is a constraint for a single physreg, or a constraint for a
5013  // register class, find it.
5014  std::pair<unsigned, const TargetRegisterClass*> PhysReg =
5015    TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
5016                                     OpInfo.ConstraintVT);
5017
5018  unsigned NumRegs = 1;
5019  if (OpInfo.ConstraintVT != MVT::Other) {
5020    // If this is a FP input in an integer register (or visa versa) insert a bit
5021    // cast of the input value.  More generally, handle any case where the input
5022    // value disagrees with the register class we plan to stick this in.
5023    if (OpInfo.Type == InlineAsm::isInput &&
5024        PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
5025      // Try to convert to the first EVT that the reg class contains.  If the
5026      // types are identical size, use a bitcast to convert (e.g. two differing
5027      // vector types).
5028      EVT RegVT = *PhysReg.second->vt_begin();
5029      if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
5030        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5031                                         RegVT, OpInfo.CallOperand);
5032        OpInfo.ConstraintVT = RegVT;
5033      } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
5034        // If the input is a FP value and we want it in FP registers, do a
5035        // bitcast to the corresponding integer type.  This turns an f64 value
5036        // into i64, which can be passed with two i32 values on a 32-bit
5037        // machine.
5038        RegVT = EVT::getIntegerVT(Context,
5039                                  OpInfo.ConstraintVT.getSizeInBits());
5040        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5041                                         RegVT, OpInfo.CallOperand);
5042        OpInfo.ConstraintVT = RegVT;
5043      }
5044    }
5045
5046    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
5047  }
5048
5049  EVT RegVT;
5050  EVT ValueVT = OpInfo.ConstraintVT;
5051
5052  // If this is a constraint for a specific physical register, like {r17},
5053  // assign it now.
5054  if (unsigned AssignedReg = PhysReg.first) {
5055    const TargetRegisterClass *RC = PhysReg.second;
5056    if (OpInfo.ConstraintVT == MVT::Other)
5057      ValueVT = *RC->vt_begin();
5058
5059    // Get the actual register value type.  This is important, because the user
5060    // may have asked for (e.g.) the AX register in i32 type.  We need to
5061    // remember that AX is actually i16 to get the right extension.
5062    RegVT = *RC->vt_begin();
5063
5064    // This is a explicit reference to a physical register.
5065    Regs.push_back(AssignedReg);
5066
5067    // If this is an expanded reference, add the rest of the regs to Regs.
5068    if (NumRegs != 1) {
5069      TargetRegisterClass::iterator I = RC->begin();
5070      for (; *I != AssignedReg; ++I)
5071        assert(I != RC->end() && "Didn't find reg!");
5072
5073      // Already added the first reg.
5074      --NumRegs; ++I;
5075      for (; NumRegs; --NumRegs, ++I) {
5076        assert(I != RC->end() && "Ran out of registers to allocate!");
5077        Regs.push_back(*I);
5078      }
5079    }
5080
5081    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5082    const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5083    OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5084    return;
5085  }
5086
5087  // Otherwise, if this was a reference to an LLVM register class, create vregs
5088  // for this reference.
5089  if (const TargetRegisterClass *RC = PhysReg.second) {
5090    RegVT = *RC->vt_begin();
5091    if (OpInfo.ConstraintVT == MVT::Other)
5092      ValueVT = RegVT;
5093
5094    // Create the appropriate number of virtual registers.
5095    MachineRegisterInfo &RegInfo = MF.getRegInfo();
5096    for (; NumRegs; --NumRegs)
5097      Regs.push_back(RegInfo.createVirtualRegister(RC));
5098
5099    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5100    return;
5101  }
5102
5103  // This is a reference to a register class that doesn't directly correspond
5104  // to an LLVM register class.  Allocate NumRegs consecutive, available,
5105  // registers from the class.
5106  std::vector<unsigned> RegClassRegs
5107    = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5108                                            OpInfo.ConstraintVT);
5109
5110  const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5111  unsigned NumAllocated = 0;
5112  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5113    unsigned Reg = RegClassRegs[i];
5114    // See if this register is available.
5115    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
5116        (isInReg  && InputRegs.count(Reg))) {    // Already used.
5117      // Make sure we find consecutive registers.
5118      NumAllocated = 0;
5119      continue;
5120    }
5121
5122    // Check to see if this register is allocatable (i.e. don't give out the
5123    // stack pointer).
5124    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5125    if (!RC) {        // Couldn't allocate this register.
5126      // Reset NumAllocated to make sure we return consecutive registers.
5127      NumAllocated = 0;
5128      continue;
5129    }
5130
5131    // Okay, this register is good, we can use it.
5132    ++NumAllocated;
5133
5134    // If we allocated enough consecutive registers, succeed.
5135    if (NumAllocated == NumRegs) {
5136      unsigned RegStart = (i-NumAllocated)+1;
5137      unsigned RegEnd   = i+1;
5138      // Mark all of the allocated registers used.
5139      for (unsigned i = RegStart; i != RegEnd; ++i)
5140        Regs.push_back(RegClassRegs[i]);
5141
5142      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5143                                         OpInfo.ConstraintVT);
5144      OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5145      return;
5146    }
5147  }
5148
5149  // Otherwise, we couldn't allocate enough registers for this.
5150}
5151
5152/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5153/// processed uses a memory 'm' constraint.
5154static bool
5155hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5156                          const TargetLowering &TLI) {
5157  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5158    InlineAsm::ConstraintInfo &CI = CInfos[i];
5159    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5160      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5161      if (CType == TargetLowering::C_Memory)
5162        return true;
5163    }
5164
5165    // Indirect operand accesses access memory.
5166    if (CI.isIndirect)
5167      return true;
5168  }
5169
5170  return false;
5171}
5172
5173/// visitInlineAsm - Handle a call to an InlineAsm object.
5174///
5175void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
5176  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5177
5178  /// ConstraintOperands - Information about all of the constraints.
5179  std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5180
5181  std::set<unsigned> OutputRegs, InputRegs;
5182
5183  // Do a prepass over the constraints, canonicalizing them, and building up the
5184  // ConstraintOperands list.
5185  std::vector<InlineAsm::ConstraintInfo>
5186    ConstraintInfos = IA->ParseConstraints();
5187
5188  bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5189
5190  SDValue Chain, Flag;
5191
5192  // We won't need to flush pending loads if this asm doesn't touch
5193  // memory and is nonvolatile.
5194  if (hasMemory || IA->hasSideEffects())
5195    Chain = getRoot();
5196  else
5197    Chain = DAG.getRoot();
5198
5199  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
5200  unsigned ResNo = 0;   // ResNo - The result number of the next output.
5201  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5202    ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5203    SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5204
5205    EVT OpVT = MVT::Other;
5206
5207    // Compute the value type for each operand.
5208    switch (OpInfo.Type) {
5209    case InlineAsm::isOutput:
5210      // Indirect outputs just consume an argument.
5211      if (OpInfo.isIndirect) {
5212        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5213        break;
5214      }
5215
5216      // The return value of the call is this value.  As such, there is no
5217      // corresponding argument.
5218      assert(!CS.getType()->isVoidTy() &&
5219             "Bad inline asm!");
5220      if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5221        OpVT = TLI.getValueType(STy->getElementType(ResNo));
5222      } else {
5223        assert(ResNo == 0 && "Asm only has one result!");
5224        OpVT = TLI.getValueType(CS.getType());
5225      }
5226      ++ResNo;
5227      break;
5228    case InlineAsm::isInput:
5229      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5230      break;
5231    case InlineAsm::isClobber:
5232      // Nothing to do.
5233      break;
5234    }
5235
5236    // If this is an input or an indirect output, process the call argument.
5237    // BasicBlocks are labels, currently appearing only in asm's.
5238    if (OpInfo.CallOperandVal) {
5239      // Strip bitcasts, if any.  This mostly comes up for functions.
5240      OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5241
5242      if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5243        OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5244      } else {
5245        OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5246      }
5247
5248      OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5249    }
5250
5251    OpInfo.ConstraintVT = OpVT;
5252  }
5253
5254  // Second pass over the constraints: compute which constraint option to use
5255  // and assign registers to constraints that want a specific physreg.
5256  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5257    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5258
5259    // If this is an output operand with a matching input operand, look up the
5260    // matching input. If their types mismatch, e.g. one is an integer, the
5261    // other is floating point, or their sizes are different, flag it as an
5262    // error.
5263    if (OpInfo.hasMatchingInput()) {
5264      SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5265      if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5266        if ((OpInfo.ConstraintVT.isInteger() !=
5267             Input.ConstraintVT.isInteger()) ||
5268            (OpInfo.ConstraintVT.getSizeInBits() !=
5269             Input.ConstraintVT.getSizeInBits())) {
5270          llvm_report_error("Unsupported asm: input constraint"
5271                            " with a matching output constraint of incompatible"
5272                            " type!");
5273        }
5274        Input.ConstraintVT = OpInfo.ConstraintVT;
5275      }
5276    }
5277
5278    // Compute the constraint code and ConstraintType to use.
5279    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5280
5281    // If this is a memory input, and if the operand is not indirect, do what we
5282    // need to to provide an address for the memory input.
5283    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5284        !OpInfo.isIndirect) {
5285      assert(OpInfo.Type == InlineAsm::isInput &&
5286             "Can only indirectify direct input operands!");
5287
5288      // Memory operands really want the address of the value.  If we don't have
5289      // an indirect input, put it in the constpool if we can, otherwise spill
5290      // it to a stack slot.
5291
5292      // If the operand is a float, integer, or vector constant, spill to a
5293      // constant pool entry to get its address.
5294      Value *OpVal = OpInfo.CallOperandVal;
5295      if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5296          isa<ConstantVector>(OpVal)) {
5297        OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5298                                                 TLI.getPointerTy());
5299      } else {
5300        // Otherwise, create a stack slot and emit a store to it before the
5301        // asm.
5302        const Type *Ty = OpVal->getType();
5303        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5304        unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5305        MachineFunction &MF = DAG.getMachineFunction();
5306        int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
5307        SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5308        Chain = DAG.getStore(Chain, getCurDebugLoc(),
5309                             OpInfo.CallOperand, StackSlot, NULL, 0);
5310        OpInfo.CallOperand = StackSlot;
5311      }
5312
5313      // There is no longer a Value* corresponding to this operand.
5314      OpInfo.CallOperandVal = 0;
5315
5316      // It is now an indirect operand.
5317      OpInfo.isIndirect = true;
5318    }
5319
5320    // If this constraint is for a specific register, allocate it before
5321    // anything else.
5322    if (OpInfo.ConstraintType == TargetLowering::C_Register)
5323      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5324  }
5325
5326  ConstraintInfos.clear();
5327
5328  // Second pass - Loop over all of the operands, assigning virtual or physregs
5329  // to register class operands.
5330  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5331    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5332
5333    // C_Register operands have already been allocated, Other/Memory don't need
5334    // to be.
5335    if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5336      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5337  }
5338
5339  // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5340  std::vector<SDValue> AsmNodeOperands;
5341  AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
5342  AsmNodeOperands.push_back(
5343          DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
5344                                      TLI.getPointerTy()));
5345
5346
5347  // Loop over all of the inputs, copying the operand values into the
5348  // appropriate registers and processing the output regs.
5349  RegsForValue RetValRegs;
5350
5351  // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5352  std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5353
5354  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5355    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5356
5357    switch (OpInfo.Type) {
5358    case InlineAsm::isOutput: {
5359      if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5360          OpInfo.ConstraintType != TargetLowering::C_Register) {
5361        // Memory output, or 'other' output (e.g. 'X' constraint).
5362        assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5363
5364        // Add information to the INLINEASM node to know about this output.
5365        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5366        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5367                                                        TLI.getPointerTy()));
5368        AsmNodeOperands.push_back(OpInfo.CallOperand);
5369        break;
5370      }
5371
5372      // Otherwise, this is a register or register class output.
5373
5374      // Copy the output from the appropriate register.  Find a register that
5375      // we can use.
5376      if (OpInfo.AssignedRegs.Regs.empty()) {
5377        llvm_report_error("Couldn't allocate output reg for"
5378                          " constraint '" + OpInfo.ConstraintCode + "'!");
5379      }
5380
5381      // If this is an indirect operand, store through the pointer after the
5382      // asm.
5383      if (OpInfo.isIndirect) {
5384        IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5385                                                      OpInfo.CallOperandVal));
5386      } else {
5387        // This is the result value of the call.
5388        assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
5389        // Concatenate this output onto the outputs list.
5390        RetValRegs.append(OpInfo.AssignedRegs);
5391      }
5392
5393      // Add information to the INLINEASM node to know that this register is
5394      // set.
5395      OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5396                                               6 /* EARLYCLOBBER REGDEF */ :
5397                                               2 /* REGDEF */ ,
5398                                               false,
5399                                               0,
5400                                               DAG, SDNodeOrder,
5401                                               AsmNodeOperands);
5402      break;
5403    }
5404    case InlineAsm::isInput: {
5405      SDValue InOperandVal = OpInfo.CallOperand;
5406
5407      if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
5408        // If this is required to match an output register we have already set,
5409        // just use its register.
5410        unsigned OperandNo = OpInfo.getMatchedOperand();
5411
5412        // Scan until we find the definition we already emitted of this operand.
5413        // When we find it, create a RegsForValue operand.
5414        unsigned CurOp = 2;  // The first operand.
5415        for (; OperandNo; --OperandNo) {
5416          // Advance to the next operand.
5417          unsigned OpFlag =
5418            cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5419          assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5420                  (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5421                  (OpFlag & 7) == 4 /*MEM*/) &&
5422                 "Skipped past definitions?");
5423          CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5424        }
5425
5426        unsigned OpFlag =
5427          cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5428        if ((OpFlag & 7) == 2 /*REGDEF*/
5429            || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5430          // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5431          if (OpInfo.isIndirect) {
5432            llvm_report_error("Don't know how to handle tied indirect "
5433                              "register inputs yet!");
5434          }
5435          RegsForValue MatchedRegs;
5436          MatchedRegs.TLI = &TLI;
5437          MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5438          EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5439          MatchedRegs.RegVTs.push_back(RegVT);
5440          MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5441          for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5442               i != e; ++i)
5443            MatchedRegs.Regs.push_back
5444              (RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5445
5446          // Use the produced MatchedRegs object to
5447          MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5448                                    SDNodeOrder, Chain, &Flag);
5449          MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5450                                           true, OpInfo.getMatchedOperand(),
5451                                           DAG, SDNodeOrder, AsmNodeOperands);
5452          break;
5453        } else {
5454          assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5455          assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5456                 "Unexpected number of operands");
5457          // Add information to the INLINEASM node to know about this input.
5458          // See InlineAsm.h isUseOperandTiedToDef.
5459          OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5460          AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5461                                                          TLI.getPointerTy()));
5462          AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5463          break;
5464        }
5465      }
5466
5467      if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5468        assert(!OpInfo.isIndirect &&
5469               "Don't know how to handle indirect other inputs yet!");
5470
5471        std::vector<SDValue> Ops;
5472        TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5473                                         hasMemory, Ops, DAG);
5474        if (Ops.empty()) {
5475          llvm_report_error("Invalid operand for inline asm"
5476                            " constraint '" + OpInfo.ConstraintCode + "'!");
5477        }
5478
5479        // Add information to the INLINEASM node to know about this input.
5480        unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5481        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5482                                                        TLI.getPointerTy()));
5483        AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5484        break;
5485      } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5486        assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5487        assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5488               "Memory operands expect pointer values");
5489
5490        // Add information to the INLINEASM node to know about this input.
5491        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5492        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5493                                                        TLI.getPointerTy()));
5494        AsmNodeOperands.push_back(InOperandVal);
5495        break;
5496      }
5497
5498      assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5499              OpInfo.ConstraintType == TargetLowering::C_Register) &&
5500             "Unknown constraint type!");
5501      assert(!OpInfo.isIndirect &&
5502             "Don't know how to handle indirect register inputs yet!");
5503
5504      // Copy the input into the appropriate registers.
5505      if (OpInfo.AssignedRegs.Regs.empty()) {
5506        llvm_report_error("Couldn't allocate input reg for"
5507                          " constraint '"+ OpInfo.ConstraintCode +"'!");
5508      }
5509
5510      OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5511                                        SDNodeOrder, Chain, &Flag);
5512
5513      OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5514                                               DAG, SDNodeOrder,
5515                                               AsmNodeOperands);
5516      break;
5517    }
5518    case InlineAsm::isClobber: {
5519      // Add the clobbered value to the operand list, so that the register
5520      // allocator is aware that the physreg got clobbered.
5521      if (!OpInfo.AssignedRegs.Regs.empty())
5522        OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5523                                                 false, 0, DAG, SDNodeOrder,
5524                                                 AsmNodeOperands);
5525      break;
5526    }
5527    }
5528  }
5529
5530  // Finish up input operands.
5531  AsmNodeOperands[0] = Chain;
5532  if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5533
5534  Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5535                      DAG.getVTList(MVT::Other, MVT::Flag),
5536                      &AsmNodeOperands[0], AsmNodeOperands.size());
5537  Flag = Chain.getValue(1);
5538
5539  // If this asm returns a register value, copy the result from that register
5540  // and set it as the value of the call.
5541  if (!RetValRegs.Regs.empty()) {
5542    SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5543                                             SDNodeOrder, Chain, &Flag);
5544
5545    // FIXME: Why don't we do this for inline asms with MRVs?
5546    if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5547      EVT ResultType = TLI.getValueType(CS.getType());
5548
5549      // If any of the results of the inline asm is a vector, it may have the
5550      // wrong width/num elts.  This can happen for register classes that can
5551      // contain multiple different value types.  The preg or vreg allocated may
5552      // not have the same VT as was expected.  Convert it to the right type
5553      // with bit_convert.
5554      if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5555        Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5556                          ResultType, Val);
5557
5558      } else if (ResultType != Val.getValueType() &&
5559                 ResultType.isInteger() && Val.getValueType().isInteger()) {
5560        // If a result value was tied to an input value, the computed result may
5561        // have a wider width than the expected result.  Extract the relevant
5562        // portion.
5563        Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5564      }
5565
5566      assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5567    }
5568
5569    setValue(CS.getInstruction(), Val);
5570    // Don't need to use this as a chain in this case.
5571    if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5572      return;
5573  }
5574
5575  std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5576
5577  // Process indirect outputs, first output all of the flagged copies out of
5578  // physregs.
5579  for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5580    RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5581    Value *Ptr = IndirectStoresToEmit[i].second;
5582    SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5583                                             SDNodeOrder, Chain, &Flag);
5584    StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5585
5586  }
5587
5588  // Emit the non-flagged stores from the physregs.
5589  SmallVector<SDValue, 8> OutChains;
5590  for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
5591    SDValue Val = DAG.getStore(Chain, getCurDebugLoc(),
5592                               StoresToEmit[i].first,
5593                               getValue(StoresToEmit[i].second),
5594                               StoresToEmit[i].second, 0);
5595    OutChains.push_back(Val);
5596  }
5597
5598  if (!OutChains.empty())
5599    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5600                        &OutChains[0], OutChains.size());
5601
5602  DAG.setRoot(Chain);
5603}
5604
5605void SelectionDAGBuilder::visitVAStart(CallInst &I) {
5606  DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5607                          MVT::Other, getRoot(),
5608                          getValue(I.getOperand(1)),
5609                          DAG.getSrcValue(I.getOperand(1))));
5610}
5611
5612void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
5613  SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5614                           getRoot(), getValue(I.getOperand(0)),
5615                           DAG.getSrcValue(I.getOperand(0)));
5616  setValue(&I, V);
5617  DAG.setRoot(V.getValue(1));
5618}
5619
5620void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
5621  DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5622                          MVT::Other, getRoot(),
5623                          getValue(I.getOperand(1)),
5624                          DAG.getSrcValue(I.getOperand(1))));
5625}
5626
5627void SelectionDAGBuilder::visitVACopy(CallInst &I) {
5628  DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5629                          MVT::Other, getRoot(),
5630                          getValue(I.getOperand(1)),
5631                          getValue(I.getOperand(2)),
5632                          DAG.getSrcValue(I.getOperand(1)),
5633                          DAG.getSrcValue(I.getOperand(2))));
5634}
5635
5636/// TargetLowering::LowerCallTo - This is the default LowerCallTo
5637/// implementation, which just calls LowerCall.
5638/// FIXME: When all targets are
5639/// migrated to using LowerCall, this hook should be integrated into SDISel.
5640std::pair<SDValue, SDValue>
5641TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5642                            bool RetSExt, bool RetZExt, bool isVarArg,
5643                            bool isInreg, unsigned NumFixedArgs,
5644                            CallingConv::ID CallConv, bool isTailCall,
5645                            bool isReturnValueUsed,
5646                            SDValue Callee,
5647                            ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl,
5648                            unsigned Order) {
5649  // Handle all of the outgoing arguments.
5650  SmallVector<ISD::OutputArg, 32> Outs;
5651  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5652    SmallVector<EVT, 4> ValueVTs;
5653    ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5654    for (unsigned Value = 0, NumValues = ValueVTs.size();
5655         Value != NumValues; ++Value) {
5656      EVT VT = ValueVTs[Value];
5657      const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
5658      SDValue Op = SDValue(Args[i].Node.getNode(),
5659                           Args[i].Node.getResNo() + Value);
5660      ISD::ArgFlagsTy Flags;
5661      unsigned OriginalAlignment =
5662        getTargetData()->getABITypeAlignment(ArgTy);
5663
5664      if (Args[i].isZExt)
5665        Flags.setZExt();
5666      if (Args[i].isSExt)
5667        Flags.setSExt();
5668      if (Args[i].isInReg)
5669        Flags.setInReg();
5670      if (Args[i].isSRet)
5671        Flags.setSRet();
5672      if (Args[i].isByVal) {
5673        Flags.setByVal();
5674        const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5675        const Type *ElementTy = Ty->getElementType();
5676        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5677        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
5678        // For ByVal, alignment should come from FE.  BE will guess if this
5679        // info is not there but there are cases it cannot get right.
5680        if (Args[i].Alignment)
5681          FrameAlign = Args[i].Alignment;
5682        Flags.setByValAlign(FrameAlign);
5683        Flags.setByValSize(FrameSize);
5684      }
5685      if (Args[i].isNest)
5686        Flags.setNest();
5687      Flags.setOrigAlign(OriginalAlignment);
5688
5689      EVT PartVT = getRegisterType(RetTy->getContext(), VT);
5690      unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
5691      SmallVector<SDValue, 4> Parts(NumParts);
5692      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5693
5694      if (Args[i].isSExt)
5695        ExtendKind = ISD::SIGN_EXTEND;
5696      else if (Args[i].isZExt)
5697        ExtendKind = ISD::ZERO_EXTEND;
5698
5699      getCopyToParts(DAG, dl, Order, Op, &Parts[0], NumParts,
5700                     PartVT, ExtendKind);
5701
5702      for (unsigned j = 0; j != NumParts; ++j) {
5703        // if it isn't first piece, alignment must be 1
5704        ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
5705        if (NumParts > 1 && j == 0)
5706          MyFlags.Flags.setSplit();
5707        else if (j != 0)
5708          MyFlags.Flags.setOrigAlign(1);
5709
5710        Outs.push_back(MyFlags);
5711      }
5712    }
5713  }
5714
5715  // Handle the incoming return values from the call.
5716  SmallVector<ISD::InputArg, 32> Ins;
5717  SmallVector<EVT, 4> RetTys;
5718  ComputeValueVTs(*this, RetTy, RetTys);
5719  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5720    EVT VT = RetTys[I];
5721    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5722    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5723    for (unsigned i = 0; i != NumRegs; ++i) {
5724      ISD::InputArg MyFlags;
5725      MyFlags.VT = RegisterVT;
5726      MyFlags.Used = isReturnValueUsed;
5727      if (RetSExt)
5728        MyFlags.Flags.setSExt();
5729      if (RetZExt)
5730        MyFlags.Flags.setZExt();
5731      if (isInreg)
5732        MyFlags.Flags.setInReg();
5733      Ins.push_back(MyFlags);
5734    }
5735  }
5736
5737  SmallVector<SDValue, 4> InVals;
5738  Chain = LowerCall(Chain, Callee, RetTy, CallConv, isVarArg, isTailCall,
5739                    Outs, Ins, dl, DAG, InVals);
5740
5741  // Verify that the target's LowerCall behaved as expected.
5742  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
5743         "LowerCall didn't return a valid chain!");
5744  assert((!isTailCall || InVals.empty()) &&
5745         "LowerCall emitted a return value for a tail call!");
5746  assert((isTailCall || InVals.size() == Ins.size()) &&
5747         "LowerCall didn't emit the correct number of values!");
5748  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5749          assert(InVals[i].getNode() &&
5750                 "LowerCall emitted a null value!");
5751          assert(Ins[i].VT == InVals[i].getValueType() &&
5752                 "LowerCall emitted a value with the wrong type!");
5753        });
5754
5755  // For a tail call, the return value is merely live-out and there aren't
5756  // any nodes in the DAG representing it. Return a special value to
5757  // indicate that a tail call has been emitted and no more Instructions
5758  // should be processed in the current block.
5759  if (isTailCall) {
5760    DAG.setRoot(Chain);
5761    return std::make_pair(SDValue(), SDValue());
5762  }
5763
5764  // Collect the legal value parts into potentially illegal values
5765  // that correspond to the original function's return values.
5766  ISD::NodeType AssertOp = ISD::DELETED_NODE;
5767  if (RetSExt)
5768    AssertOp = ISD::AssertSext;
5769  else if (RetZExt)
5770    AssertOp = ISD::AssertZext;
5771  SmallVector<SDValue, 4> ReturnValues;
5772  unsigned CurReg = 0;
5773  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5774    EVT VT = RetTys[I];
5775    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5776    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5777
5778    ReturnValues.push_back(getCopyFromParts(DAG, dl, Order, &InVals[CurReg],
5779                                            NumRegs, RegisterVT, VT,
5780                                            AssertOp));
5781    CurReg += NumRegs;
5782  }
5783
5784  // For a function returning void, there is no return value. We can't create
5785  // such a node, so we just return a null return value in that case. In
5786  // that case, nothing will actualy look at the value.
5787  if (ReturnValues.empty())
5788    return std::make_pair(SDValue(), Chain);
5789
5790  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5791                            DAG.getVTList(&RetTys[0], RetTys.size()),
5792                            &ReturnValues[0], ReturnValues.size());
5793  return std::make_pair(Res, Chain);
5794}
5795
5796void TargetLowering::LowerOperationWrapper(SDNode *N,
5797                                           SmallVectorImpl<SDValue> &Results,
5798                                           SelectionDAG &DAG) {
5799  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5800  if (Res.getNode())
5801    Results.push_back(Res);
5802}
5803
5804SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5805  llvm_unreachable("LowerOperation not implemented for this target!");
5806  return SDValue();
5807}
5808
5809void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5810  SDValue Op = getValue(V);
5811  assert((Op.getOpcode() != ISD::CopyFromReg ||
5812          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5813         "Copy from a reg to the same reg!");
5814  assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5815
5816  RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
5817  SDValue Chain = DAG.getEntryNode();
5818  RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), SDNodeOrder, Chain, 0);
5819  PendingExports.push_back(Chain);
5820}
5821
5822#include "llvm/CodeGen/SelectionDAGISel.h"
5823
5824void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
5825  // If this is the entry block, emit arguments.
5826  Function &F = *LLVMBB->getParent();
5827  SelectionDAG &DAG = SDB->DAG;
5828  SDValue OldRoot = DAG.getRoot();
5829  DebugLoc dl = SDB->getCurDebugLoc();
5830  const TargetData *TD = TLI.getTargetData();
5831  SmallVector<ISD::InputArg, 16> Ins;
5832
5833  // Check whether the function can return without sret-demotion.
5834  SmallVector<EVT, 4> OutVTs;
5835  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
5836  getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
5837                OutVTs, OutsFlags, TLI);
5838  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
5839
5840  FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
5841                                          OutVTs, OutsFlags, DAG);
5842  if (!FLI.CanLowerReturn) {
5843    // Put in an sret pointer parameter before all the other parameters.
5844    SmallVector<EVT, 1> ValueVTs;
5845    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
5846
5847    // NOTE: Assuming that a pointer will never break down to more than one VT
5848    // or one register.
5849    ISD::ArgFlagsTy Flags;
5850    Flags.setSRet();
5851    EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), ValueVTs[0]);
5852    ISD::InputArg RetArg(Flags, RegisterVT, true);
5853    Ins.push_back(RetArg);
5854  }
5855
5856  // Set up the incoming argument description vector.
5857  unsigned Idx = 1;
5858  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5859       I != E; ++I, ++Idx) {
5860    SmallVector<EVT, 4> ValueVTs;
5861    ComputeValueVTs(TLI, I->getType(), ValueVTs);
5862    bool isArgValueUsed = !I->use_empty();
5863    for (unsigned Value = 0, NumValues = ValueVTs.size();
5864         Value != NumValues; ++Value) {
5865      EVT VT = ValueVTs[Value];
5866      const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
5867      ISD::ArgFlagsTy Flags;
5868      unsigned OriginalAlignment =
5869        TD->getABITypeAlignment(ArgTy);
5870
5871      if (F.paramHasAttr(Idx, Attribute::ZExt))
5872        Flags.setZExt();
5873      if (F.paramHasAttr(Idx, Attribute::SExt))
5874        Flags.setSExt();
5875      if (F.paramHasAttr(Idx, Attribute::InReg))
5876        Flags.setInReg();
5877      if (F.paramHasAttr(Idx, Attribute::StructRet))
5878        Flags.setSRet();
5879      if (F.paramHasAttr(Idx, Attribute::ByVal)) {
5880        Flags.setByVal();
5881        const PointerType *Ty = cast<PointerType>(I->getType());
5882        const Type *ElementTy = Ty->getElementType();
5883        unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
5884        unsigned FrameSize  = TD->getTypeAllocSize(ElementTy);
5885        // For ByVal, alignment should be passed from FE.  BE will guess if
5886        // this info is not there but there are cases it cannot get right.
5887        if (F.getParamAlignment(Idx))
5888          FrameAlign = F.getParamAlignment(Idx);
5889        Flags.setByValAlign(FrameAlign);
5890        Flags.setByValSize(FrameSize);
5891      }
5892      if (F.paramHasAttr(Idx, Attribute::Nest))
5893        Flags.setNest();
5894      Flags.setOrigAlign(OriginalAlignment);
5895
5896      EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5897      unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5898      for (unsigned i = 0; i != NumRegs; ++i) {
5899        ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
5900        if (NumRegs > 1 && i == 0)
5901          MyFlags.Flags.setSplit();
5902        // if it isn't first piece, alignment must be 1
5903        else if (i > 0)
5904          MyFlags.Flags.setOrigAlign(1);
5905        Ins.push_back(MyFlags);
5906      }
5907    }
5908  }
5909
5910  // Call the target to set up the argument values.
5911  SmallVector<SDValue, 8> InVals;
5912  SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
5913                                             F.isVarArg(), Ins,
5914                                             dl, DAG, InVals);
5915
5916  // Verify that the target's LowerFormalArguments behaved as expected.
5917  assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
5918         "LowerFormalArguments didn't return a valid chain!");
5919  assert(InVals.size() == Ins.size() &&
5920         "LowerFormalArguments didn't emit the correct number of values!");
5921  DEBUG({
5922      for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5923        assert(InVals[i].getNode() &&
5924               "LowerFormalArguments emitted a null value!");
5925        assert(Ins[i].VT == InVals[i].getValueType() &&
5926               "LowerFormalArguments emitted a value with the wrong type!");
5927      }
5928    });
5929
5930  // Update the DAG with the new chain value resulting from argument lowering.
5931  DAG.setRoot(NewRoot);
5932
5933  // Set up the argument values.
5934  unsigned i = 0;
5935  Idx = 1;
5936  if (!FLI.CanLowerReturn) {
5937    // Create a virtual register for the sret pointer, and put in a copy
5938    // from the sret argument into it.
5939    SmallVector<EVT, 1> ValueVTs;
5940    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
5941    EVT VT = ValueVTs[0];
5942    EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5943    ISD::NodeType AssertOp = ISD::DELETED_NODE;
5944    SDValue ArgValue = getCopyFromParts(DAG, dl, 0, &InVals[0], 1,
5945                                        RegVT, VT, AssertOp);
5946
5947    MachineFunction& MF = SDB->DAG.getMachineFunction();
5948    MachineRegisterInfo& RegInfo = MF.getRegInfo();
5949    unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
5950    FLI.DemoteRegister = SRetReg;
5951    NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(),
5952                                    SRetReg, ArgValue);
5953    DAG.setRoot(NewRoot);
5954
5955    // i indexes lowered arguments.  Bump it past the hidden sret argument.
5956    // Idx indexes LLVM arguments.  Don't touch it.
5957    ++i;
5958  }
5959
5960  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5961      ++I, ++Idx) {
5962    SmallVector<SDValue, 4> ArgValues;
5963    SmallVector<EVT, 4> ValueVTs;
5964    ComputeValueVTs(TLI, I->getType(), ValueVTs);
5965    unsigned NumValues = ValueVTs.size();
5966    for (unsigned Value = 0; Value != NumValues; ++Value) {
5967      EVT VT = ValueVTs[Value];
5968      EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5969      unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5970
5971      if (!I->use_empty()) {
5972        ISD::NodeType AssertOp = ISD::DELETED_NODE;
5973        if (F.paramHasAttr(Idx, Attribute::SExt))
5974          AssertOp = ISD::AssertSext;
5975        else if (F.paramHasAttr(Idx, Attribute::ZExt))
5976          AssertOp = ISD::AssertZext;
5977
5978        ArgValues.push_back(getCopyFromParts(DAG, dl, 0, &InVals[i],
5979                                             NumParts, PartVT, VT,
5980                                             AssertOp));
5981      }
5982
5983      i += NumParts;
5984    }
5985
5986    if (!I->use_empty()) {
5987      SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
5988                                       SDB->getCurDebugLoc());
5989      SDB->setValue(I, Res);
5990
5991      // If this argument is live outside of the entry block, insert a copy from
5992      // whereever we got it to the vreg that other BB's will reference it as.
5993      SDB->CopyToExportRegsIfNeeded(I);
5994    }
5995  }
5996
5997  assert(i == InVals.size() && "Argument register count mismatch!");
5998
5999  // Finally, if the target has anything special to do, allow it to do so.
6000  // FIXME: this should insert code into the DAG!
6001  EmitFunctionEntryCode(F, SDB->DAG.getMachineFunction());
6002}
6003
6004/// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
6005/// ensure constants are generated when needed.  Remember the virtual registers
6006/// that need to be added to the Machine PHI nodes as input.  We cannot just
6007/// directly add them, because expansion might result in multiple MBB's for one
6008/// BB.  As such, the start of the BB might correspond to a different MBB than
6009/// the end.
6010///
6011void
6012SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
6013  TerminatorInst *TI = LLVMBB->getTerminator();
6014
6015  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6016
6017  // Check successor nodes' PHI nodes that expect a constant to be available
6018  // from this block.
6019  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6020    BasicBlock *SuccBB = TI->getSuccessor(succ);
6021    if (!isa<PHINode>(SuccBB->begin())) continue;
6022    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6023
6024    // If this terminator has multiple identical successors (common for
6025    // switches), only handle each succ once.
6026    if (!SuccsHandled.insert(SuccMBB)) continue;
6027
6028    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6029    PHINode *PN;
6030
6031    // At this point we know that there is a 1-1 correspondence between LLVM PHI
6032    // nodes and Machine PHI nodes, but the incoming operands have not been
6033    // emitted yet.
6034    for (BasicBlock::iterator I = SuccBB->begin();
6035         (PN = dyn_cast<PHINode>(I)); ++I) {
6036      // Ignore dead phi's.
6037      if (PN->use_empty()) continue;
6038
6039      unsigned Reg;
6040      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6041
6042      if (Constant *C = dyn_cast<Constant>(PHIOp)) {
6043        unsigned &RegOut = SDB->ConstantsOut[C];
6044        if (RegOut == 0) {
6045          RegOut = FuncInfo->CreateRegForValue(C);
6046          SDB->CopyValueToVirtualRegister(C, RegOut);
6047        }
6048        Reg = RegOut;
6049      } else {
6050        Reg = FuncInfo->ValueMap[PHIOp];
6051        if (Reg == 0) {
6052          assert(isa<AllocaInst>(PHIOp) &&
6053                 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
6054                 "Didn't codegen value into a register!??");
6055          Reg = FuncInfo->CreateRegForValue(PHIOp);
6056          SDB->CopyValueToVirtualRegister(PHIOp, Reg);
6057        }
6058      }
6059
6060      // Remember that this register needs to added to the machine PHI node as
6061      // the input for this MBB.
6062      SmallVector<EVT, 4> ValueVTs;
6063      ComputeValueVTs(TLI, PN->getType(), ValueVTs);
6064      for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
6065        EVT VT = ValueVTs[vti];
6066        unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6067        for (unsigned i = 0, e = NumRegisters; i != e; ++i)
6068          SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
6069        Reg += NumRegisters;
6070      }
6071    }
6072  }
6073  SDB->ConstantsOut.clear();
6074}
6075
6076/// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
6077/// supports legal types, and it emits MachineInstrs directly instead of
6078/// creating SelectionDAG nodes.
6079///
6080bool
6081SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
6082                                                      FastISel *F) {
6083  TerminatorInst *TI = LLVMBB->getTerminator();
6084
6085  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6086  unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
6087
6088  // Check successor nodes' PHI nodes that expect a constant to be available
6089  // from this block.
6090  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6091    BasicBlock *SuccBB = TI->getSuccessor(succ);
6092    if (!isa<PHINode>(SuccBB->begin())) continue;
6093    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6094
6095    // If this terminator has multiple identical successors (common for
6096    // switches), only handle each succ once.
6097    if (!SuccsHandled.insert(SuccMBB)) continue;
6098
6099    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6100    PHINode *PN;
6101
6102    // At this point we know that there is a 1-1 correspondence between LLVM PHI
6103    // nodes and Machine PHI nodes, but the incoming operands have not been
6104    // emitted yet.
6105    for (BasicBlock::iterator I = SuccBB->begin();
6106         (PN = dyn_cast<PHINode>(I)); ++I) {
6107      // Ignore dead phi's.
6108      if (PN->use_empty()) continue;
6109
6110      // Only handle legal types. Two interesting things to note here. First,
6111      // by bailing out early, we may leave behind some dead instructions,
6112      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6113      // own moves. Second, this check is necessary becuase FastISel doesn't
6114      // use CreateRegForValue to create registers, so it always creates
6115      // exactly one register for each non-void instruction.
6116      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6117      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6118        // Promote MVT::i1.
6119        if (VT == MVT::i1)
6120          VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
6121        else {
6122          SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6123          return false;
6124        }
6125      }
6126
6127      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6128
6129      unsigned Reg = F->getRegForValue(PHIOp);
6130      if (Reg == 0) {
6131        SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6132        return false;
6133      }
6134      SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
6135    }
6136  }
6137
6138  return true;
6139}
6140