SelectionDAG.cpp revision 8430a2950c737eef2d6c6d098e265f4ff6e4723b
1//===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAG class.
11//
12//===----------------------------------------------------------------------===//
13#include "llvm/CodeGen/SelectionDAG.h"
14#include "llvm/Constants.h"
15#include "llvm/Analysis/ValueTracking.h"
16#include "llvm/GlobalAlias.h"
17#include "llvm/GlobalVariable.h"
18#include "llvm/Intrinsics.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Assembly/Writer.h"
21#include "llvm/CallingConv.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
23#include "llvm/CodeGen/MachineConstantPool.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineModuleInfo.h"
26#include "llvm/CodeGen/PseudoSourceValue.h"
27#include "llvm/Target/TargetRegisterInfo.h"
28#include "llvm/Target/TargetData.h"
29#include "llvm/Target/TargetLowering.h"
30#include "llvm/Target/TargetOptions.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/ManagedStatic.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/System/Mutex.h"
38#include "llvm/ADT/SetVector.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/SmallSet.h"
41#include "llvm/ADT/SmallVector.h"
42#include "llvm/ADT/StringExtras.h"
43#include <algorithm>
44#include <cmath>
45using namespace llvm;
46
47/// makeVTList - Return an instance of the SDVTList struct initialized with the
48/// specified members.
49static SDVTList makeVTList(const MVT *VTs, unsigned NumVTs) {
50  SDVTList Res = {VTs, NumVTs};
51  return Res;
52}
53
54static const fltSemantics *MVTToAPFloatSemantics(MVT VT) {
55  switch (VT.getSimpleVT()) {
56  default: assert(0 && "Unknown FP format");
57  case MVT::f32:     return &APFloat::IEEEsingle;
58  case MVT::f64:     return &APFloat::IEEEdouble;
59  case MVT::f80:     return &APFloat::x87DoubleExtended;
60  case MVT::f128:    return &APFloat::IEEEquad;
61  case MVT::ppcf128: return &APFloat::PPCDoubleDouble;
62  }
63}
64
65SelectionDAG::DAGUpdateListener::~DAGUpdateListener() {}
66
67//===----------------------------------------------------------------------===//
68//                              ConstantFPSDNode Class
69//===----------------------------------------------------------------------===//
70
71/// isExactlyValue - We don't rely on operator== working on double values, as
72/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
73/// As such, this method can be used to do an exact bit-for-bit comparison of
74/// two floating point values.
75bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
76  return getValueAPF().bitwiseIsEqual(V);
77}
78
79bool ConstantFPSDNode::isValueValidForType(MVT VT,
80                                           const APFloat& Val) {
81  assert(VT.isFloatingPoint() && "Can only convert between FP types");
82
83  // PPC long double cannot be converted to any other type.
84  if (VT == MVT::ppcf128 ||
85      &Val.getSemantics() == &APFloat::PPCDoubleDouble)
86    return false;
87
88  // convert modifies in place, so make a copy.
89  APFloat Val2 = APFloat(Val);
90  bool losesInfo;
91  (void) Val2.convert(*MVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
92                      &losesInfo);
93  return !losesInfo;
94}
95
96//===----------------------------------------------------------------------===//
97//                              ISD Namespace
98//===----------------------------------------------------------------------===//
99
100/// isBuildVectorAllOnes - Return true if the specified node is a
101/// BUILD_VECTOR where all of the elements are ~0 or undef.
102bool ISD::isBuildVectorAllOnes(const SDNode *N) {
103  // Look through a bit convert.
104  if (N->getOpcode() == ISD::BIT_CONVERT)
105    N = N->getOperand(0).getNode();
106
107  if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
108
109  unsigned i = 0, e = N->getNumOperands();
110
111  // Skip over all of the undef values.
112  while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
113    ++i;
114
115  // Do not accept an all-undef vector.
116  if (i == e) return false;
117
118  // Do not accept build_vectors that aren't all constants or which have non-~0
119  // elements.
120  SDValue NotZero = N->getOperand(i);
121  if (isa<ConstantSDNode>(NotZero)) {
122    if (!cast<ConstantSDNode>(NotZero)->isAllOnesValue())
123      return false;
124  } else if (isa<ConstantFPSDNode>(NotZero)) {
125    if (!cast<ConstantFPSDNode>(NotZero)->getValueAPF().
126                bitcastToAPInt().isAllOnesValue())
127      return false;
128  } else
129    return false;
130
131  // Okay, we have at least one ~0 value, check to see if the rest match or are
132  // undefs.
133  for (++i; i != e; ++i)
134    if (N->getOperand(i) != NotZero &&
135        N->getOperand(i).getOpcode() != ISD::UNDEF)
136      return false;
137  return true;
138}
139
140
141/// isBuildVectorAllZeros - Return true if the specified node is a
142/// BUILD_VECTOR where all of the elements are 0 or undef.
143bool ISD::isBuildVectorAllZeros(const SDNode *N) {
144  // Look through a bit convert.
145  if (N->getOpcode() == ISD::BIT_CONVERT)
146    N = N->getOperand(0).getNode();
147
148  if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
149
150  unsigned i = 0, e = N->getNumOperands();
151
152  // Skip over all of the undef values.
153  while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
154    ++i;
155
156  // Do not accept an all-undef vector.
157  if (i == e) return false;
158
159  // Do not accept build_vectors that aren't all constants or which have non-0
160  // elements.
161  SDValue Zero = N->getOperand(i);
162  if (isa<ConstantSDNode>(Zero)) {
163    if (!cast<ConstantSDNode>(Zero)->isNullValue())
164      return false;
165  } else if (isa<ConstantFPSDNode>(Zero)) {
166    if (!cast<ConstantFPSDNode>(Zero)->getValueAPF().isPosZero())
167      return false;
168  } else
169    return false;
170
171  // Okay, we have at least one 0 value, check to see if the rest match or are
172  // undefs.
173  for (++i; i != e; ++i)
174    if (N->getOperand(i) != Zero &&
175        N->getOperand(i).getOpcode() != ISD::UNDEF)
176      return false;
177  return true;
178}
179
180/// isScalarToVector - Return true if the specified node is a
181/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
182/// element is not an undef.
183bool ISD::isScalarToVector(const SDNode *N) {
184  if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
185    return true;
186
187  if (N->getOpcode() != ISD::BUILD_VECTOR)
188    return false;
189  if (N->getOperand(0).getOpcode() == ISD::UNDEF)
190    return false;
191  unsigned NumElems = N->getNumOperands();
192  for (unsigned i = 1; i < NumElems; ++i) {
193    SDValue V = N->getOperand(i);
194    if (V.getOpcode() != ISD::UNDEF)
195      return false;
196  }
197  return true;
198}
199
200
201/// isDebugLabel - Return true if the specified node represents a debug
202/// label (i.e. ISD::DBG_LABEL or TargetInstrInfo::DBG_LABEL node).
203bool ISD::isDebugLabel(const SDNode *N) {
204  SDValue Zero;
205  if (N->getOpcode() == ISD::DBG_LABEL)
206    return true;
207  if (N->isMachineOpcode() &&
208      N->getMachineOpcode() == TargetInstrInfo::DBG_LABEL)
209    return true;
210  return false;
211}
212
213/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
214/// when given the operation for (X op Y).
215ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
216  // To perform this operation, we just need to swap the L and G bits of the
217  // operation.
218  unsigned OldL = (Operation >> 2) & 1;
219  unsigned OldG = (Operation >> 1) & 1;
220  return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
221                       (OldL << 1) |       // New G bit
222                       (OldG << 2));       // New L bit.
223}
224
225/// getSetCCInverse - Return the operation corresponding to !(X op Y), where
226/// 'op' is a valid SetCC operation.
227ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
228  unsigned Operation = Op;
229  if (isInteger)
230    Operation ^= 7;   // Flip L, G, E bits, but not U.
231  else
232    Operation ^= 15;  // Flip all of the condition bits.
233
234  if (Operation > ISD::SETTRUE2)
235    Operation &= ~8;  // Don't let N and U bits get set.
236
237  return ISD::CondCode(Operation);
238}
239
240
241/// isSignedOp - For an integer comparison, return 1 if the comparison is a
242/// signed operation and 2 if the result is an unsigned comparison.  Return zero
243/// if the operation does not depend on the sign of the input (setne and seteq).
244static int isSignedOp(ISD::CondCode Opcode) {
245  switch (Opcode) {
246  default: assert(0 && "Illegal integer setcc operation!");
247  case ISD::SETEQ:
248  case ISD::SETNE: return 0;
249  case ISD::SETLT:
250  case ISD::SETLE:
251  case ISD::SETGT:
252  case ISD::SETGE: return 1;
253  case ISD::SETULT:
254  case ISD::SETULE:
255  case ISD::SETUGT:
256  case ISD::SETUGE: return 2;
257  }
258}
259
260/// getSetCCOrOperation - Return the result of a logical OR between different
261/// comparisons of identical values: ((X op1 Y) | (X op2 Y)).  This function
262/// returns SETCC_INVALID if it is not possible to represent the resultant
263/// comparison.
264ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
265                                       bool isInteger) {
266  if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
267    // Cannot fold a signed integer setcc with an unsigned integer setcc.
268    return ISD::SETCC_INVALID;
269
270  unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
271
272  // If the N and U bits get set then the resultant comparison DOES suddenly
273  // care about orderedness, and is true when ordered.
274  if (Op > ISD::SETTRUE2)
275    Op &= ~16;     // Clear the U bit if the N bit is set.
276
277  // Canonicalize illegal integer setcc's.
278  if (isInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
279    Op = ISD::SETNE;
280
281  return ISD::CondCode(Op);
282}
283
284/// getSetCCAndOperation - Return the result of a logical AND between different
285/// comparisons of identical values: ((X op1 Y) & (X op2 Y)).  This
286/// function returns zero if it is not possible to represent the resultant
287/// comparison.
288ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
289                                        bool isInteger) {
290  if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
291    // Cannot fold a signed setcc with an unsigned setcc.
292    return ISD::SETCC_INVALID;
293
294  // Combine all of the condition bits.
295  ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
296
297  // Canonicalize illegal integer setcc's.
298  if (isInteger) {
299    switch (Result) {
300    default: break;
301    case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
302    case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
303    case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
304    case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
305    case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
306    }
307  }
308
309  return Result;
310}
311
312const TargetMachine &SelectionDAG::getTarget() const {
313  return MF->getTarget();
314}
315
316//===----------------------------------------------------------------------===//
317//                           SDNode Profile Support
318//===----------------------------------------------------------------------===//
319
320/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
321///
322static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
323  ID.AddInteger(OpC);
324}
325
326/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
327/// solely with their pointer.
328static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
329  ID.AddPointer(VTList.VTs);
330}
331
332/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
333///
334static void AddNodeIDOperands(FoldingSetNodeID &ID,
335                              const SDValue *Ops, unsigned NumOps) {
336  for (; NumOps; --NumOps, ++Ops) {
337    ID.AddPointer(Ops->getNode());
338    ID.AddInteger(Ops->getResNo());
339  }
340}
341
342/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
343///
344static void AddNodeIDOperands(FoldingSetNodeID &ID,
345                              const SDUse *Ops, unsigned NumOps) {
346  for (; NumOps; --NumOps, ++Ops) {
347    ID.AddPointer(Ops->getNode());
348    ID.AddInteger(Ops->getResNo());
349  }
350}
351
352static void AddNodeIDNode(FoldingSetNodeID &ID,
353                          unsigned short OpC, SDVTList VTList,
354                          const SDValue *OpList, unsigned N) {
355  AddNodeIDOpcode(ID, OpC);
356  AddNodeIDValueTypes(ID, VTList);
357  AddNodeIDOperands(ID, OpList, N);
358}
359
360/// AddNodeIDCustom - If this is an SDNode with special info, add this info to
361/// the NodeID data.
362static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
363  switch (N->getOpcode()) {
364  default: break;  // Normal nodes don't need extra info.
365  case ISD::ARG_FLAGS:
366    ID.AddInteger(cast<ARG_FLAGSSDNode>(N)->getArgFlags().getRawBits());
367    break;
368  case ISD::TargetConstant:
369  case ISD::Constant:
370    ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
371    break;
372  case ISD::TargetConstantFP:
373  case ISD::ConstantFP: {
374    ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
375    break;
376  }
377  case ISD::TargetGlobalAddress:
378  case ISD::GlobalAddress:
379  case ISD::TargetGlobalTLSAddress:
380  case ISD::GlobalTLSAddress: {
381    const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
382    ID.AddPointer(GA->getGlobal());
383    ID.AddInteger(GA->getOffset());
384    break;
385  }
386  case ISD::BasicBlock:
387    ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
388    break;
389  case ISD::Register:
390    ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
391    break;
392  case ISD::DBG_STOPPOINT: {
393    const DbgStopPointSDNode *DSP = cast<DbgStopPointSDNode>(N);
394    ID.AddInteger(DSP->getLine());
395    ID.AddInteger(DSP->getColumn());
396    ID.AddPointer(DSP->getCompileUnit());
397    break;
398  }
399  case ISD::SRCVALUE:
400    ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
401    break;
402  case ISD::MEMOPERAND: {
403    const MachineMemOperand &MO = cast<MemOperandSDNode>(N)->MO;
404    MO.Profile(ID);
405    break;
406  }
407  case ISD::FrameIndex:
408  case ISD::TargetFrameIndex:
409    ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
410    break;
411  case ISD::JumpTable:
412  case ISD::TargetJumpTable:
413    ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
414    break;
415  case ISD::ConstantPool:
416  case ISD::TargetConstantPool: {
417    const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
418    ID.AddInteger(CP->getAlignment());
419    ID.AddInteger(CP->getOffset());
420    if (CP->isMachineConstantPoolEntry())
421      CP->getMachineCPVal()->AddSelectionDAGCSEId(ID);
422    else
423      ID.AddPointer(CP->getConstVal());
424    break;
425  }
426  case ISD::CALL: {
427    const CallSDNode *Call = cast<CallSDNode>(N);
428    ID.AddInteger(Call->getCallingConv());
429    ID.AddInteger(Call->isVarArg());
430    break;
431  }
432  case ISD::LOAD: {
433    const LoadSDNode *LD = cast<LoadSDNode>(N);
434    ID.AddInteger(LD->getMemoryVT().getRawBits());
435    ID.AddInteger(LD->getRawSubclassData());
436    break;
437  }
438  case ISD::STORE: {
439    const StoreSDNode *ST = cast<StoreSDNode>(N);
440    ID.AddInteger(ST->getMemoryVT().getRawBits());
441    ID.AddInteger(ST->getRawSubclassData());
442    break;
443  }
444  case ISD::ATOMIC_CMP_SWAP:
445  case ISD::ATOMIC_SWAP:
446  case ISD::ATOMIC_LOAD_ADD:
447  case ISD::ATOMIC_LOAD_SUB:
448  case ISD::ATOMIC_LOAD_AND:
449  case ISD::ATOMIC_LOAD_OR:
450  case ISD::ATOMIC_LOAD_XOR:
451  case ISD::ATOMIC_LOAD_NAND:
452  case ISD::ATOMIC_LOAD_MIN:
453  case ISD::ATOMIC_LOAD_MAX:
454  case ISD::ATOMIC_LOAD_UMIN:
455  case ISD::ATOMIC_LOAD_UMAX: {
456    const AtomicSDNode *AT = cast<AtomicSDNode>(N);
457    ID.AddInteger(AT->getMemoryVT().getRawBits());
458    ID.AddInteger(AT->getRawSubclassData());
459    break;
460  }
461  case ISD::VECTOR_SHUFFLE: {
462    const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
463    for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
464         i != e; ++i)
465      ID.AddInteger(SVN->getMaskElt(i));
466    break;
467  }
468  } // end switch (N->getOpcode())
469}
470
471/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
472/// data.
473static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
474  AddNodeIDOpcode(ID, N->getOpcode());
475  // Add the return value info.
476  AddNodeIDValueTypes(ID, N->getVTList());
477  // Add the operand info.
478  AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
479
480  // Handle SDNode leafs with special info.
481  AddNodeIDCustom(ID, N);
482}
483
484/// encodeMemSDNodeFlags - Generic routine for computing a value for use in
485/// the CSE map that carries alignment, volatility, indexing mode, and
486/// extension/truncation information.
487///
488static inline unsigned
489encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM,
490                     bool isVolatile, unsigned Alignment) {
491  assert((ConvType & 3) == ConvType &&
492         "ConvType may not require more than 2 bits!");
493  assert((AM & 7) == AM &&
494         "AM may not require more than 3 bits!");
495  return ConvType |
496         (AM << 2) |
497         (isVolatile << 5) |
498         ((Log2_32(Alignment) + 1) << 6);
499}
500
501//===----------------------------------------------------------------------===//
502//                              SelectionDAG Class
503//===----------------------------------------------------------------------===//
504
505/// doNotCSE - Return true if CSE should not be performed for this node.
506static bool doNotCSE(SDNode *N) {
507  if (N->getValueType(0) == MVT::Flag)
508    return true; // Never CSE anything that produces a flag.
509
510  switch (N->getOpcode()) {
511  default: break;
512  case ISD::HANDLENODE:
513  case ISD::DBG_LABEL:
514  case ISD::DBG_STOPPOINT:
515  case ISD::EH_LABEL:
516  case ISD::DECLARE:
517    return true;   // Never CSE these nodes.
518  }
519
520  // Check that remaining values produced are not flags.
521  for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
522    if (N->getValueType(i) == MVT::Flag)
523      return true; // Never CSE anything that produces a flag.
524
525  return false;
526}
527
528/// RemoveDeadNodes - This method deletes all unreachable nodes in the
529/// SelectionDAG.
530void SelectionDAG::RemoveDeadNodes() {
531  // Create a dummy node (which is not added to allnodes), that adds a reference
532  // to the root node, preventing it from being deleted.
533  HandleSDNode Dummy(getRoot());
534
535  SmallVector<SDNode*, 128> DeadNodes;
536
537  // Add all obviously-dead nodes to the DeadNodes worklist.
538  for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
539    if (I->use_empty())
540      DeadNodes.push_back(I);
541
542  RemoveDeadNodes(DeadNodes);
543
544  // If the root changed (e.g. it was a dead load, update the root).
545  setRoot(Dummy.getValue());
546}
547
548/// RemoveDeadNodes - This method deletes the unreachable nodes in the
549/// given list, and any nodes that become unreachable as a result.
550void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
551                                   DAGUpdateListener *UpdateListener) {
552
553  // Process the worklist, deleting the nodes and adding their uses to the
554  // worklist.
555  while (!DeadNodes.empty()) {
556    SDNode *N = DeadNodes.pop_back_val();
557
558    if (UpdateListener)
559      UpdateListener->NodeDeleted(N, 0);
560
561    // Take the node out of the appropriate CSE map.
562    RemoveNodeFromCSEMaps(N);
563
564    // Next, brutally remove the operand list.  This is safe to do, as there are
565    // no cycles in the graph.
566    for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
567      SDUse &Use = *I++;
568      SDNode *Operand = Use.getNode();
569      Use.set(SDValue());
570
571      // Now that we removed this operand, see if there are no uses of it left.
572      if (Operand->use_empty())
573        DeadNodes.push_back(Operand);
574    }
575
576    DeallocateNode(N);
577  }
578}
579
580void SelectionDAG::RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener){
581  SmallVector<SDNode*, 16> DeadNodes(1, N);
582  RemoveDeadNodes(DeadNodes, UpdateListener);
583}
584
585void SelectionDAG::DeleteNode(SDNode *N) {
586  // First take this out of the appropriate CSE map.
587  RemoveNodeFromCSEMaps(N);
588
589  // Finally, remove uses due to operands of this node, remove from the
590  // AllNodes list, and delete the node.
591  DeleteNodeNotInCSEMaps(N);
592}
593
594void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
595  assert(N != AllNodes.begin() && "Cannot delete the entry node!");
596  assert(N->use_empty() && "Cannot delete a node that is not dead!");
597
598  // Drop all of the operands and decrement used node's use counts.
599  N->DropOperands();
600
601  DeallocateNode(N);
602}
603
604void SelectionDAG::DeallocateNode(SDNode *N) {
605  if (N->OperandsNeedDelete)
606    delete[] N->OperandList;
607
608  // Set the opcode to DELETED_NODE to help catch bugs when node
609  // memory is reallocated.
610  N->NodeType = ISD::DELETED_NODE;
611
612  NodeAllocator.Deallocate(AllNodes.remove(N));
613}
614
615/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
616/// correspond to it.  This is useful when we're about to delete or repurpose
617/// the node.  We don't want future request for structurally identical nodes
618/// to return N anymore.
619bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
620  bool Erased = false;
621  switch (N->getOpcode()) {
622  case ISD::EntryToken:
623    assert(0 && "EntryToken should not be in CSEMaps!");
624    return false;
625  case ISD::HANDLENODE: return false;  // noop.
626  case ISD::CONDCODE:
627    assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
628           "Cond code doesn't exist!");
629    Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
630    CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
631    break;
632  case ISD::ExternalSymbol:
633    Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
634    break;
635  case ISD::TargetExternalSymbol: {
636    ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
637    Erased = TargetExternalSymbols.erase(
638               std::pair<std::string,unsigned char>(ESN->getSymbol(),
639                                                    ESN->getTargetFlags()));
640    break;
641  }
642  case ISD::VALUETYPE: {
643    MVT VT = cast<VTSDNode>(N)->getVT();
644    if (VT.isExtended()) {
645      Erased = ExtendedValueTypeNodes.erase(VT);
646    } else {
647      Erased = ValueTypeNodes[VT.getSimpleVT()] != 0;
648      ValueTypeNodes[VT.getSimpleVT()] = 0;
649    }
650    break;
651  }
652  default:
653    // Remove it from the CSE Map.
654    Erased = CSEMap.RemoveNode(N);
655    break;
656  }
657#ifndef NDEBUG
658  // Verify that the node was actually in one of the CSE maps, unless it has a
659  // flag result (which cannot be CSE'd) or is one of the special cases that are
660  // not subject to CSE.
661  if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Flag &&
662      !N->isMachineOpcode() && !doNotCSE(N)) {
663    N->dump(this);
664    cerr << "\n";
665    assert(0 && "Node is not in map!");
666  }
667#endif
668  return Erased;
669}
670
671/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
672/// maps and modified in place. Add it back to the CSE maps, unless an identical
673/// node already exists, in which case transfer all its users to the existing
674/// node. This transfer can potentially trigger recursive merging.
675///
676void
677SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N,
678                                       DAGUpdateListener *UpdateListener) {
679  // For node types that aren't CSE'd, just act as if no identical node
680  // already exists.
681  if (!doNotCSE(N)) {
682    SDNode *Existing = CSEMap.GetOrInsertNode(N);
683    if (Existing != N) {
684      // If there was already an existing matching node, use ReplaceAllUsesWith
685      // to replace the dead one with the existing one.  This can cause
686      // recursive merging of other unrelated nodes down the line.
687      ReplaceAllUsesWith(N, Existing, UpdateListener);
688
689      // N is now dead.  Inform the listener if it exists and delete it.
690      if (UpdateListener)
691        UpdateListener->NodeDeleted(N, Existing);
692      DeleteNodeNotInCSEMaps(N);
693      return;
694    }
695  }
696
697  // If the node doesn't already exist, we updated it.  Inform a listener if
698  // it exists.
699  if (UpdateListener)
700    UpdateListener->NodeUpdated(N);
701}
702
703/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
704/// were replaced with those specified.  If this node is never memoized,
705/// return null, otherwise return a pointer to the slot it would take.  If a
706/// node already exists with these operands, the slot will be non-null.
707SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
708                                           void *&InsertPos) {
709  if (doNotCSE(N))
710    return 0;
711
712  SDValue Ops[] = { Op };
713  FoldingSetNodeID ID;
714  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
715  AddNodeIDCustom(ID, N);
716  return CSEMap.FindNodeOrInsertPos(ID, InsertPos);
717}
718
719/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
720/// were replaced with those specified.  If this node is never memoized,
721/// return null, otherwise return a pointer to the slot it would take.  If a
722/// node already exists with these operands, the slot will be non-null.
723SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
724                                           SDValue Op1, SDValue Op2,
725                                           void *&InsertPos) {
726  if (doNotCSE(N))
727    return 0;
728
729  SDValue Ops[] = { Op1, Op2 };
730  FoldingSetNodeID ID;
731  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
732  AddNodeIDCustom(ID, N);
733  return CSEMap.FindNodeOrInsertPos(ID, InsertPos);
734}
735
736
737/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
738/// were replaced with those specified.  If this node is never memoized,
739/// return null, otherwise return a pointer to the slot it would take.  If a
740/// node already exists with these operands, the slot will be non-null.
741SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
742                                           const SDValue *Ops,unsigned NumOps,
743                                           void *&InsertPos) {
744  if (doNotCSE(N))
745    return 0;
746
747  FoldingSetNodeID ID;
748  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
749  AddNodeIDCustom(ID, N);
750  return CSEMap.FindNodeOrInsertPos(ID, InsertPos);
751}
752
753/// VerifyNode - Sanity check the given node.  Aborts if it is invalid.
754void SelectionDAG::VerifyNode(SDNode *N) {
755  switch (N->getOpcode()) {
756  default:
757    break;
758  case ISD::BUILD_PAIR: {
759    MVT VT = N->getValueType(0);
760    assert(N->getNumValues() == 1 && "Too many results!");
761    assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
762           "Wrong return type!");
763    assert(N->getNumOperands() == 2 && "Wrong number of operands!");
764    assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
765           "Mismatched operand types!");
766    assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
767           "Wrong operand type!");
768    assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
769           "Wrong return type size");
770    break;
771  }
772  case ISD::BUILD_VECTOR: {
773    assert(N->getNumValues() == 1 && "Too many results!");
774    assert(N->getValueType(0).isVector() && "Wrong return type!");
775    assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
776           "Wrong number of operands!");
777    MVT EltVT = N->getValueType(0).getVectorElementType();
778    for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I)
779      assert((I->getValueType() == EltVT ||
780             (EltVT.isInteger() && I->getValueType().isInteger() &&
781              EltVT.bitsLE(I->getValueType()))) &&
782            "Wrong operand type!");
783    break;
784  }
785  }
786}
787
788/// getMVTAlignment - Compute the default alignment value for the
789/// given type.
790///
791unsigned SelectionDAG::getMVTAlignment(MVT VT) const {
792  const Type *Ty = VT == MVT::iPTR ?
793                   PointerType::get(Type::Int8Ty, 0) :
794                   VT.getTypeForMVT();
795
796  return TLI.getTargetData()->getABITypeAlignment(Ty);
797}
798
799// EntryNode could meaningfully have debug info if we can find it...
800SelectionDAG::SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli)
801  : TLI(tli), FLI(fli), DW(0),
802    EntryNode(ISD::EntryToken, DebugLoc::getUnknownLoc(),
803    getVTList(MVT::Other)), Root(getEntryNode()) {
804  AllNodes.push_back(&EntryNode);
805}
806
807void SelectionDAG::init(MachineFunction &mf, MachineModuleInfo *mmi,
808                        DwarfWriter *dw) {
809  MF = &mf;
810  MMI = mmi;
811  DW = dw;
812}
813
814SelectionDAG::~SelectionDAG() {
815  allnodes_clear();
816}
817
818void SelectionDAG::allnodes_clear() {
819  assert(&*AllNodes.begin() == &EntryNode);
820  AllNodes.remove(AllNodes.begin());
821  while (!AllNodes.empty())
822    DeallocateNode(AllNodes.begin());
823}
824
825void SelectionDAG::clear() {
826  allnodes_clear();
827  OperandAllocator.Reset();
828  CSEMap.clear();
829
830  ExtendedValueTypeNodes.clear();
831  ExternalSymbols.clear();
832  TargetExternalSymbols.clear();
833  std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
834            static_cast<CondCodeSDNode*>(0));
835  std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
836            static_cast<SDNode*>(0));
837
838  EntryNode.UseList = 0;
839  AllNodes.push_back(&EntryNode);
840  Root = getEntryNode();
841}
842
843SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, MVT VT) {
844  if (Op.getValueType() == VT) return Op;
845  APInt Imm = APInt::getLowBitsSet(Op.getValueSizeInBits(),
846                                   VT.getSizeInBits());
847  return getNode(ISD::AND, DL, Op.getValueType(), Op,
848                 getConstant(Imm, Op.getValueType()));
849}
850
851/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
852///
853SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, MVT VT) {
854  MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
855  SDValue NegOne =
856    getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
857  return getNode(ISD::XOR, DL, VT, Val, NegOne);
858}
859
860SDValue SelectionDAG::getConstant(uint64_t Val, MVT VT, bool isT) {
861  MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
862  assert((EltVT.getSizeInBits() >= 64 ||
863         (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
864         "getConstant with a uint64_t value that doesn't fit in the type!");
865  return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
866}
867
868SDValue SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) {
869  return getConstant(*ConstantInt::get(Val), VT, isT);
870}
871
872SDValue SelectionDAG::getConstant(const ConstantInt &Val, MVT VT, bool isT) {
873  assert(VT.isInteger() && "Cannot create FP integer constant!");
874
875  MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
876  assert(Val.getBitWidth() == EltVT.getSizeInBits() &&
877         "APInt size does not match type size!");
878
879  unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
880  FoldingSetNodeID ID;
881  AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
882  ID.AddPointer(&Val);
883  void *IP = 0;
884  SDNode *N = NULL;
885  if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
886    if (!VT.isVector())
887      return SDValue(N, 0);
888  if (!N) {
889    N = NodeAllocator.Allocate<ConstantSDNode>();
890    new (N) ConstantSDNode(isT, &Val, EltVT);
891    CSEMap.InsertNode(N, IP);
892    AllNodes.push_back(N);
893  }
894
895  SDValue Result(N, 0);
896  if (VT.isVector()) {
897    SmallVector<SDValue, 8> Ops;
898    Ops.assign(VT.getVectorNumElements(), Result);
899    Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
900                     VT, &Ops[0], Ops.size());
901  }
902  return Result;
903}
904
905SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
906  return getConstant(Val, TLI.getPointerTy(), isTarget);
907}
908
909
910SDValue SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) {
911  return getConstantFP(*ConstantFP::get(V), VT, isTarget);
912}
913
914SDValue SelectionDAG::getConstantFP(const ConstantFP& V, MVT VT, bool isTarget){
915  assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
916
917  MVT EltVT =
918    VT.isVector() ? VT.getVectorElementType() : VT;
919
920  // Do the map lookup using the actual bit pattern for the floating point
921  // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
922  // we don't have issues with SNANs.
923  unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
924  FoldingSetNodeID ID;
925  AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
926  ID.AddPointer(&V);
927  void *IP = 0;
928  SDNode *N = NULL;
929  if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
930    if (!VT.isVector())
931      return SDValue(N, 0);
932  if (!N) {
933    N = NodeAllocator.Allocate<ConstantFPSDNode>();
934    new (N) ConstantFPSDNode(isTarget, &V, EltVT);
935    CSEMap.InsertNode(N, IP);
936    AllNodes.push_back(N);
937  }
938
939  SDValue Result(N, 0);
940  if (VT.isVector()) {
941    SmallVector<SDValue, 8> Ops;
942    Ops.assign(VT.getVectorNumElements(), Result);
943    // FIXME DebugLoc info might be appropriate here
944    Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
945                     VT, &Ops[0], Ops.size());
946  }
947  return Result;
948}
949
950SDValue SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) {
951  MVT EltVT =
952    VT.isVector() ? VT.getVectorElementType() : VT;
953  if (EltVT==MVT::f32)
954    return getConstantFP(APFloat((float)Val), VT, isTarget);
955  else
956    return getConstantFP(APFloat(Val), VT, isTarget);
957}
958
959SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
960                                       MVT VT, int64_t Offset,
961                                       bool isTargetGA) {
962  unsigned Opc;
963
964  // Truncate (with sign-extension) the offset value to the pointer size.
965  unsigned BitWidth = TLI.getPointerTy().getSizeInBits();
966  if (BitWidth < 64)
967    Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth));
968
969  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
970  if (!GVar) {
971    // If GV is an alias then use the aliasee for determining thread-localness.
972    if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
973      GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
974  }
975
976  if (GVar && GVar->isThreadLocal())
977    Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
978  else
979    Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
980
981  FoldingSetNodeID ID;
982  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
983  ID.AddPointer(GV);
984  ID.AddInteger(Offset);
985  void *IP = 0;
986  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
987    return SDValue(E, 0);
988  SDNode *N = NodeAllocator.Allocate<GlobalAddressSDNode>();
989  new (N) GlobalAddressSDNode(isTargetGA, GV, VT, Offset);
990  CSEMap.InsertNode(N, IP);
991  AllNodes.push_back(N);
992  return SDValue(N, 0);
993}
994
995SDValue SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) {
996  unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
997  FoldingSetNodeID ID;
998  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
999  ID.AddInteger(FI);
1000  void *IP = 0;
1001  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1002    return SDValue(E, 0);
1003  SDNode *N = NodeAllocator.Allocate<FrameIndexSDNode>();
1004  new (N) FrameIndexSDNode(FI, VT, isTarget);
1005  CSEMap.InsertNode(N, IP);
1006  AllNodes.push_back(N);
1007  return SDValue(N, 0);
1008}
1009
1010SDValue SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget){
1011  unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1012  FoldingSetNodeID ID;
1013  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1014  ID.AddInteger(JTI);
1015  void *IP = 0;
1016  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1017    return SDValue(E, 0);
1018  SDNode *N = NodeAllocator.Allocate<JumpTableSDNode>();
1019  new (N) JumpTableSDNode(JTI, VT, isTarget);
1020  CSEMap.InsertNode(N, IP);
1021  AllNodes.push_back(N);
1022  return SDValue(N, 0);
1023}
1024
1025SDValue SelectionDAG::getConstantPool(Constant *C, MVT VT,
1026                                      unsigned Alignment, int Offset,
1027                                      bool isTarget) {
1028  if (Alignment == 0)
1029    Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
1030  unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1031  FoldingSetNodeID ID;
1032  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1033  ID.AddInteger(Alignment);
1034  ID.AddInteger(Offset);
1035  ID.AddPointer(C);
1036  void *IP = 0;
1037  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1038    return SDValue(E, 0);
1039  SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>();
1040  new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment);
1041  CSEMap.InsertNode(N, IP);
1042  AllNodes.push_back(N);
1043  return SDValue(N, 0);
1044}
1045
1046
1047SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT,
1048                                      unsigned Alignment, int Offset,
1049                                      bool isTarget) {
1050  if (Alignment == 0)
1051    Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
1052  unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1053  FoldingSetNodeID ID;
1054  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1055  ID.AddInteger(Alignment);
1056  ID.AddInteger(Offset);
1057  C->AddSelectionDAGCSEId(ID);
1058  void *IP = 0;
1059  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1060    return SDValue(E, 0);
1061  SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>();
1062  new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment);
1063  CSEMap.InsertNode(N, IP);
1064  AllNodes.push_back(N);
1065  return SDValue(N, 0);
1066}
1067
1068SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1069  FoldingSetNodeID ID;
1070  AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1071  ID.AddPointer(MBB);
1072  void *IP = 0;
1073  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1074    return SDValue(E, 0);
1075  SDNode *N = NodeAllocator.Allocate<BasicBlockSDNode>();
1076  new (N) BasicBlockSDNode(MBB);
1077  CSEMap.InsertNode(N, IP);
1078  AllNodes.push_back(N);
1079  return SDValue(N, 0);
1080}
1081
1082SDValue SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) {
1083  FoldingSetNodeID ID;
1084  AddNodeIDNode(ID, ISD::ARG_FLAGS, getVTList(MVT::Other), 0, 0);
1085  ID.AddInteger(Flags.getRawBits());
1086  void *IP = 0;
1087  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1088    return SDValue(E, 0);
1089  SDNode *N = NodeAllocator.Allocate<ARG_FLAGSSDNode>();
1090  new (N) ARG_FLAGSSDNode(Flags);
1091  CSEMap.InsertNode(N, IP);
1092  AllNodes.push_back(N);
1093  return SDValue(N, 0);
1094}
1095
1096SDValue SelectionDAG::getValueType(MVT VT) {
1097  if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size())
1098    ValueTypeNodes.resize(VT.getSimpleVT()+1);
1099
1100  SDNode *&N = VT.isExtended() ?
1101    ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT()];
1102
1103  if (N) return SDValue(N, 0);
1104  N = NodeAllocator.Allocate<VTSDNode>();
1105  new (N) VTSDNode(VT);
1106  AllNodes.push_back(N);
1107  return SDValue(N, 0);
1108}
1109
1110SDValue SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) {
1111  SDNode *&N = ExternalSymbols[Sym];
1112  if (N) return SDValue(N, 0);
1113  N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
1114  new (N) ExternalSymbolSDNode(false, Sym, 0, VT);
1115  AllNodes.push_back(N);
1116  return SDValue(N, 0);
1117}
1118
1119SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, MVT VT,
1120                                              unsigned char TargetFlags) {
1121  SDNode *&N =
1122    TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1123                                                               TargetFlags)];
1124  if (N) return SDValue(N, 0);
1125  N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
1126  new (N) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1127  AllNodes.push_back(N);
1128  return SDValue(N, 0);
1129}
1130
1131SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1132  if ((unsigned)Cond >= CondCodeNodes.size())
1133    CondCodeNodes.resize(Cond+1);
1134
1135  if (CondCodeNodes[Cond] == 0) {
1136    CondCodeSDNode *N = NodeAllocator.Allocate<CondCodeSDNode>();
1137    new (N) CondCodeSDNode(Cond);
1138    CondCodeNodes[Cond] = N;
1139    AllNodes.push_back(N);
1140  }
1141  return SDValue(CondCodeNodes[Cond], 0);
1142}
1143
1144// commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1145// the shuffle mask M that point at N1 to point at N2, and indices that point
1146// N2 to point at N1.
1147static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1148  std::swap(N1, N2);
1149  int NElts = M.size();
1150  for (int i = 0; i != NElts; ++i) {
1151    if (M[i] >= NElts)
1152      M[i] -= NElts;
1153    else if (M[i] >= 0)
1154      M[i] += NElts;
1155  }
1156}
1157
1158SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
1159                                       SDValue N2, const int *Mask) {
1160  assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE");
1161  assert(VT.isVector() && N1.getValueType().isVector() &&
1162         "Vector Shuffle VTs must be a vectors");
1163  assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType()
1164         && "Vector Shuffle VTs must have same element type");
1165
1166  // Canonicalize shuffle undef, undef -> undef
1167  if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1168    return N1;
1169
1170  // Validate that all indices in Mask are within the range of the elements
1171  // input to the shuffle.
1172  unsigned NElts = VT.getVectorNumElements();
1173  SmallVector<int, 8> MaskVec;
1174  for (unsigned i = 0; i != NElts; ++i) {
1175    assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1176    MaskVec.push_back(Mask[i]);
1177  }
1178
1179  // Canonicalize shuffle v, v -> v, undef
1180  if (N1 == N2) {
1181    N2 = getUNDEF(VT);
1182    for (unsigned i = 0; i != NElts; ++i)
1183      if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1184  }
1185
1186  // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1187  if (N1.getOpcode() == ISD::UNDEF)
1188    commuteShuffle(N1, N2, MaskVec);
1189
1190  // Canonicalize all index into lhs, -> shuffle lhs, undef
1191  // Canonicalize all index into rhs, -> shuffle rhs, undef
1192  bool AllLHS = true, AllRHS = true;
1193  bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1194  for (unsigned i = 0; i != NElts; ++i) {
1195    if (MaskVec[i] >= (int)NElts) {
1196      if (N2Undef)
1197        MaskVec[i] = -1;
1198      else
1199        AllLHS = false;
1200    } else if (MaskVec[i] >= 0) {
1201      AllRHS = false;
1202    }
1203  }
1204  if (AllLHS && AllRHS)
1205    return getUNDEF(VT);
1206  if (AllLHS && !N2Undef)
1207    N2 = getUNDEF(VT);
1208  if (AllRHS) {
1209    N1 = getUNDEF(VT);
1210    commuteShuffle(N1, N2, MaskVec);
1211  }
1212
1213  // If Identity shuffle, or all shuffle in to undef, return that node.
1214  bool AllUndef = true;
1215  bool Identity = true;
1216  for (unsigned i = 0; i != NElts; ++i) {
1217    if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1218    if (MaskVec[i] >= 0) AllUndef = false;
1219  }
1220  if (Identity)
1221    return N1;
1222  if (AllUndef)
1223    return getUNDEF(VT);
1224
1225  FoldingSetNodeID ID;
1226  SDValue Ops[2] = { N1, N2 };
1227  AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1228  for (unsigned i = 0; i != NElts; ++i)
1229    ID.AddInteger(MaskVec[i]);
1230
1231  void* IP = 0;
1232  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1233    return SDValue(E, 0);
1234
1235  // Allocate the mask array for the node out of the BumpPtrAllocator, since
1236  // SDNode doesn't have access to it.  This memory will be "leaked" when
1237  // the node is deallocated, but recovered when the NodeAllocator is released.
1238  int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1239  memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1240
1241  ShuffleVectorSDNode *N = NodeAllocator.Allocate<ShuffleVectorSDNode>();
1242  new (N) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
1243  CSEMap.InsertNode(N, IP);
1244  AllNodes.push_back(N);
1245  return SDValue(N, 0);
1246}
1247
1248SDValue SelectionDAG::getConvertRndSat(MVT VT, DebugLoc dl,
1249                                       SDValue Val, SDValue DTy,
1250                                       SDValue STy, SDValue Rnd, SDValue Sat,
1251                                       ISD::CvtCode Code) {
1252  // If the src and dest types are the same and the conversion is between
1253  // integer types of the same sign or two floats, no conversion is necessary.
1254  if (DTy == STy &&
1255      (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1256    return Val;
1257
1258  FoldingSetNodeID ID;
1259  void* IP = 0;
1260  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1261    return SDValue(E, 0);
1262  CvtRndSatSDNode *N = NodeAllocator.Allocate<CvtRndSatSDNode>();
1263  SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1264  new (N) CvtRndSatSDNode(VT, dl, Ops, 5, Code);
1265  CSEMap.InsertNode(N, IP);
1266  AllNodes.push_back(N);
1267  return SDValue(N, 0);
1268}
1269
1270SDValue SelectionDAG::getRegister(unsigned RegNo, MVT VT) {
1271  FoldingSetNodeID ID;
1272  AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1273  ID.AddInteger(RegNo);
1274  void *IP = 0;
1275  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1276    return SDValue(E, 0);
1277  SDNode *N = NodeAllocator.Allocate<RegisterSDNode>();
1278  new (N) RegisterSDNode(RegNo, VT);
1279  CSEMap.InsertNode(N, IP);
1280  AllNodes.push_back(N);
1281  return SDValue(N, 0);
1282}
1283
1284SDValue SelectionDAG::getDbgStopPoint(DebugLoc DL, SDValue Root,
1285                                      unsigned Line, unsigned Col,
1286                                      Value *CU) {
1287  SDNode *N = NodeAllocator.Allocate<DbgStopPointSDNode>();
1288  new (N) DbgStopPointSDNode(Root, Line, Col, CU);
1289  N->setDebugLoc(DL);
1290  AllNodes.push_back(N);
1291  return SDValue(N, 0);
1292}
1293
1294SDValue SelectionDAG::getLabel(unsigned Opcode, DebugLoc dl,
1295                               SDValue Root,
1296                               unsigned LabelID) {
1297  FoldingSetNodeID ID;
1298  SDValue Ops[] = { Root };
1299  AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1);
1300  ID.AddInteger(LabelID);
1301  void *IP = 0;
1302  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1303    return SDValue(E, 0);
1304  SDNode *N = NodeAllocator.Allocate<LabelSDNode>();
1305  new (N) LabelSDNode(Opcode, dl, Root, LabelID);
1306  CSEMap.InsertNode(N, IP);
1307  AllNodes.push_back(N);
1308  return SDValue(N, 0);
1309}
1310
1311SDValue SelectionDAG::getSrcValue(const Value *V) {
1312  assert((!V || isa<PointerType>(V->getType())) &&
1313         "SrcValue is not a pointer?");
1314
1315  FoldingSetNodeID ID;
1316  AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1317  ID.AddPointer(V);
1318
1319  void *IP = 0;
1320  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1321    return SDValue(E, 0);
1322
1323  SDNode *N = NodeAllocator.Allocate<SrcValueSDNode>();
1324  new (N) SrcValueSDNode(V);
1325  CSEMap.InsertNode(N, IP);
1326  AllNodes.push_back(N);
1327  return SDValue(N, 0);
1328}
1329
1330SDValue SelectionDAG::getMemOperand(const MachineMemOperand &MO) {
1331#ifndef NDEBUG
1332  const Value *v = MO.getValue();
1333  assert((!v || isa<PointerType>(v->getType())) &&
1334         "SrcValue is not a pointer?");
1335#endif
1336
1337  FoldingSetNodeID ID;
1338  AddNodeIDNode(ID, ISD::MEMOPERAND, getVTList(MVT::Other), 0, 0);
1339  MO.Profile(ID);
1340
1341  void *IP = 0;
1342  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1343    return SDValue(E, 0);
1344
1345  SDNode *N = NodeAllocator.Allocate<MemOperandSDNode>();
1346  new (N) MemOperandSDNode(MO);
1347  CSEMap.InsertNode(N, IP);
1348  AllNodes.push_back(N);
1349  return SDValue(N, 0);
1350}
1351
1352/// getShiftAmountOperand - Return the specified value casted to
1353/// the target's desired shift amount type.
1354SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
1355  MVT OpTy = Op.getValueType();
1356  MVT ShTy = TLI.getShiftAmountTy();
1357  if (OpTy == ShTy || OpTy.isVector()) return Op;
1358
1359  ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ?  ISD::TRUNCATE : ISD::ZERO_EXTEND;
1360  return getNode(Opcode, Op.getDebugLoc(), ShTy, Op);
1361}
1362
1363/// CreateStackTemporary - Create a stack temporary, suitable for holding the
1364/// specified value type.
1365SDValue SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) {
1366  MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1367  unsigned ByteSize = VT.getStoreSizeInBits()/8;
1368  const Type *Ty = VT.getTypeForMVT();
1369  unsigned StackAlign =
1370  std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
1371
1372  int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign);
1373  return getFrameIndex(FrameIdx, TLI.getPointerTy());
1374}
1375
1376/// CreateStackTemporary - Create a stack temporary suitable for holding
1377/// either of the specified value types.
1378SDValue SelectionDAG::CreateStackTemporary(MVT VT1, MVT VT2) {
1379  unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1380                            VT2.getStoreSizeInBits())/8;
1381  const Type *Ty1 = VT1.getTypeForMVT();
1382  const Type *Ty2 = VT2.getTypeForMVT();
1383  const TargetData *TD = TLI.getTargetData();
1384  unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1385                            TD->getPrefTypeAlignment(Ty2));
1386
1387  MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1388  int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align);
1389  return getFrameIndex(FrameIdx, TLI.getPointerTy());
1390}
1391
1392SDValue SelectionDAG::FoldSetCC(MVT VT, SDValue N1,
1393                                SDValue N2, ISD::CondCode Cond, DebugLoc dl) {
1394  // These setcc operations always fold.
1395  switch (Cond) {
1396  default: break;
1397  case ISD::SETFALSE:
1398  case ISD::SETFALSE2: return getConstant(0, VT);
1399  case ISD::SETTRUE:
1400  case ISD::SETTRUE2:  return getConstant(1, VT);
1401
1402  case ISD::SETOEQ:
1403  case ISD::SETOGT:
1404  case ISD::SETOGE:
1405  case ISD::SETOLT:
1406  case ISD::SETOLE:
1407  case ISD::SETONE:
1408  case ISD::SETO:
1409  case ISD::SETUO:
1410  case ISD::SETUEQ:
1411  case ISD::SETUNE:
1412    assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1413    break;
1414  }
1415
1416  if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1417    const APInt &C2 = N2C->getAPIntValue();
1418    if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1419      const APInt &C1 = N1C->getAPIntValue();
1420
1421      switch (Cond) {
1422      default: assert(0 && "Unknown integer setcc!");
1423      case ISD::SETEQ:  return getConstant(C1 == C2, VT);
1424      case ISD::SETNE:  return getConstant(C1 != C2, VT);
1425      case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1426      case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1427      case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1428      case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1429      case ISD::SETLT:  return getConstant(C1.slt(C2), VT);
1430      case ISD::SETGT:  return getConstant(C1.sgt(C2), VT);
1431      case ISD::SETLE:  return getConstant(C1.sle(C2), VT);
1432      case ISD::SETGE:  return getConstant(C1.sge(C2), VT);
1433      }
1434    }
1435  }
1436  if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1437    if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1438      // No compile time operations on this type yet.
1439      if (N1C->getValueType(0) == MVT::ppcf128)
1440        return SDValue();
1441
1442      APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1443      switch (Cond) {
1444      default: break;
1445      case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
1446                          return getUNDEF(VT);
1447                        // fall through
1448      case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1449      case ISD::SETNE:  if (R==APFloat::cmpUnordered)
1450                          return getUNDEF(VT);
1451                        // fall through
1452      case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1453                                           R==APFloat::cmpLessThan, VT);
1454      case ISD::SETLT:  if (R==APFloat::cmpUnordered)
1455                          return getUNDEF(VT);
1456                        // fall through
1457      case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1458      case ISD::SETGT:  if (R==APFloat::cmpUnordered)
1459                          return getUNDEF(VT);
1460                        // fall through
1461      case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1462      case ISD::SETLE:  if (R==APFloat::cmpUnordered)
1463                          return getUNDEF(VT);
1464                        // fall through
1465      case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1466                                           R==APFloat::cmpEqual, VT);
1467      case ISD::SETGE:  if (R==APFloat::cmpUnordered)
1468                          return getUNDEF(VT);
1469                        // fall through
1470      case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1471                                           R==APFloat::cmpEqual, VT);
1472      case ISD::SETO:   return getConstant(R!=APFloat::cmpUnordered, VT);
1473      case ISD::SETUO:  return getConstant(R==APFloat::cmpUnordered, VT);
1474      case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1475                                           R==APFloat::cmpEqual, VT);
1476      case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1477      case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1478                                           R==APFloat::cmpLessThan, VT);
1479      case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1480                                           R==APFloat::cmpUnordered, VT);
1481      case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1482      case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1483      }
1484    } else {
1485      // Ensure that the constant occurs on the RHS.
1486      return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond));
1487    }
1488  }
1489
1490  // Could not fold it.
1491  return SDValue();
1492}
1493
1494/// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
1495/// use this predicate to simplify operations downstream.
1496bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1497  unsigned BitWidth = Op.getValueSizeInBits();
1498  return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1499}
1500
1501/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
1502/// this predicate to simplify operations downstream.  Mask is known to be zero
1503/// for bits that V cannot have.
1504bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1505                                     unsigned Depth) const {
1506  APInt KnownZero, KnownOne;
1507  ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
1508  assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1509  return (KnownZero & Mask) == Mask;
1510}
1511
1512/// ComputeMaskedBits - Determine which of the bits specified in Mask are
1513/// known to be either zero or one and return them in the KnownZero/KnownOne
1514/// bitsets.  This code only analyzes bits in Mask, in order to short-circuit
1515/// processing.
1516void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
1517                                     APInt &KnownZero, APInt &KnownOne,
1518                                     unsigned Depth) const {
1519  unsigned BitWidth = Mask.getBitWidth();
1520  assert(BitWidth == Op.getValueType().getSizeInBits() &&
1521         "Mask size mismatches value type size!");
1522
1523  KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
1524  if (Depth == 6 || Mask == 0)
1525    return;  // Limit search depth.
1526
1527  APInt KnownZero2, KnownOne2;
1528
1529  switch (Op.getOpcode()) {
1530  case ISD::Constant:
1531    // We know all of the bits for a constant!
1532    KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & Mask;
1533    KnownZero = ~KnownOne & Mask;
1534    return;
1535  case ISD::AND:
1536    // If either the LHS or the RHS are Zero, the result is zero.
1537    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1538    ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownZero,
1539                      KnownZero2, KnownOne2, Depth+1);
1540    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1541    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1542
1543    // Output known-1 bits are only known if set in both the LHS & RHS.
1544    KnownOne &= KnownOne2;
1545    // Output known-0 are known to be clear if zero in either the LHS | RHS.
1546    KnownZero |= KnownZero2;
1547    return;
1548  case ISD::OR:
1549    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1550    ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownOne,
1551                      KnownZero2, KnownOne2, Depth+1);
1552    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1553    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1554
1555    // Output known-0 bits are only known if clear in both the LHS & RHS.
1556    KnownZero &= KnownZero2;
1557    // Output known-1 are known to be set if set in either the LHS | RHS.
1558    KnownOne |= KnownOne2;
1559    return;
1560  case ISD::XOR: {
1561    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1562    ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
1563    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1564    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1565
1566    // Output known-0 bits are known if clear or set in both the LHS & RHS.
1567    APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1568    // Output known-1 are known to be set if set in only one of the LHS, RHS.
1569    KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1570    KnownZero = KnownZeroOut;
1571    return;
1572  }
1573  case ISD::MUL: {
1574    APInt Mask2 = APInt::getAllOnesValue(BitWidth);
1575    ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero, KnownOne, Depth+1);
1576    ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
1577    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1578    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1579
1580    // If low bits are zero in either operand, output low known-0 bits.
1581    // Also compute a conserative estimate for high known-0 bits.
1582    // More trickiness is possible, but this is sufficient for the
1583    // interesting case of alignment computation.
1584    KnownOne.clear();
1585    unsigned TrailZ = KnownZero.countTrailingOnes() +
1586                      KnownZero2.countTrailingOnes();
1587    unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
1588                               KnownZero2.countLeadingOnes(),
1589                               BitWidth) - BitWidth;
1590
1591    TrailZ = std::min(TrailZ, BitWidth);
1592    LeadZ = std::min(LeadZ, BitWidth);
1593    KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1594                APInt::getHighBitsSet(BitWidth, LeadZ);
1595    KnownZero &= Mask;
1596    return;
1597  }
1598  case ISD::UDIV: {
1599    // For the purposes of computing leading zeros we can conservatively
1600    // treat a udiv as a logical right shift by the power of 2 known to
1601    // be less than the denominator.
1602    APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1603    ComputeMaskedBits(Op.getOperand(0),
1604                      AllOnes, KnownZero2, KnownOne2, Depth+1);
1605    unsigned LeadZ = KnownZero2.countLeadingOnes();
1606
1607    KnownOne2.clear();
1608    KnownZero2.clear();
1609    ComputeMaskedBits(Op.getOperand(1),
1610                      AllOnes, KnownZero2, KnownOne2, Depth+1);
1611    unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1612    if (RHSUnknownLeadingOnes != BitWidth)
1613      LeadZ = std::min(BitWidth,
1614                       LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1615
1616    KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
1617    return;
1618  }
1619  case ISD::SELECT:
1620    ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
1621    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
1622    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1623    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1624
1625    // Only known if known in both the LHS and RHS.
1626    KnownOne &= KnownOne2;
1627    KnownZero &= KnownZero2;
1628    return;
1629  case ISD::SELECT_CC:
1630    ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
1631    ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
1632    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1633    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1634
1635    // Only known if known in both the LHS and RHS.
1636    KnownOne &= KnownOne2;
1637    KnownZero &= KnownZero2;
1638    return;
1639  case ISD::SADDO:
1640  case ISD::UADDO:
1641  case ISD::SSUBO:
1642  case ISD::USUBO:
1643  case ISD::SMULO:
1644  case ISD::UMULO:
1645    if (Op.getResNo() != 1)
1646      return;
1647    // The boolean result conforms to getBooleanContents.  Fall through.
1648  case ISD::SETCC:
1649    // If we know the result of a setcc has the top bits zero, use this info.
1650    if (TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent &&
1651        BitWidth > 1)
1652      KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1653    return;
1654  case ISD::SHL:
1655    // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1656    if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1657      unsigned ShAmt = SA->getZExtValue();
1658
1659      // If the shift count is an invalid immediate, don't do anything.
1660      if (ShAmt >= BitWidth)
1661        return;
1662
1663      ComputeMaskedBits(Op.getOperand(0), Mask.lshr(ShAmt),
1664                        KnownZero, KnownOne, Depth+1);
1665      assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1666      KnownZero <<= ShAmt;
1667      KnownOne  <<= ShAmt;
1668      // low bits known zero.
1669      KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1670    }
1671    return;
1672  case ISD::SRL:
1673    // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1674    if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1675      unsigned ShAmt = SA->getZExtValue();
1676
1677      // If the shift count is an invalid immediate, don't do anything.
1678      if (ShAmt >= BitWidth)
1679        return;
1680
1681      ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt),
1682                        KnownZero, KnownOne, Depth+1);
1683      assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1684      KnownZero = KnownZero.lshr(ShAmt);
1685      KnownOne  = KnownOne.lshr(ShAmt);
1686
1687      APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
1688      KnownZero |= HighBits;  // High bits known zero.
1689    }
1690    return;
1691  case ISD::SRA:
1692    if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1693      unsigned ShAmt = SA->getZExtValue();
1694
1695      // If the shift count is an invalid immediate, don't do anything.
1696      if (ShAmt >= BitWidth)
1697        return;
1698
1699      APInt InDemandedMask = (Mask << ShAmt);
1700      // If any of the demanded bits are produced by the sign extension, we also
1701      // demand the input sign bit.
1702      APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
1703      if (HighBits.getBoolValue())
1704        InDemandedMask |= APInt::getSignBit(BitWidth);
1705
1706      ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
1707                        Depth+1);
1708      assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1709      KnownZero = KnownZero.lshr(ShAmt);
1710      KnownOne  = KnownOne.lshr(ShAmt);
1711
1712      // Handle the sign bits.
1713      APInt SignBit = APInt::getSignBit(BitWidth);
1714      SignBit = SignBit.lshr(ShAmt);  // Adjust to where it is now in the mask.
1715
1716      if (KnownZero.intersects(SignBit)) {
1717        KnownZero |= HighBits;  // New bits are known zero.
1718      } else if (KnownOne.intersects(SignBit)) {
1719        KnownOne  |= HighBits;  // New bits are known one.
1720      }
1721    }
1722    return;
1723  case ISD::SIGN_EXTEND_INREG: {
1724    MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1725    unsigned EBits = EVT.getSizeInBits();
1726
1727    // Sign extension.  Compute the demanded bits in the result that are not
1728    // present in the input.
1729    APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits) & Mask;
1730
1731    APInt InSignBit = APInt::getSignBit(EBits);
1732    APInt InputDemandedBits = Mask & APInt::getLowBitsSet(BitWidth, EBits);
1733
1734    // If the sign extended bits are demanded, we know that the sign
1735    // bit is demanded.
1736    InSignBit.zext(BitWidth);
1737    if (NewBits.getBoolValue())
1738      InputDemandedBits |= InSignBit;
1739
1740    ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
1741                      KnownZero, KnownOne, Depth+1);
1742    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1743
1744    // If the sign bit of the input is known set or clear, then we know the
1745    // top bits of the result.
1746    if (KnownZero.intersects(InSignBit)) {         // Input sign bit known clear
1747      KnownZero |= NewBits;
1748      KnownOne  &= ~NewBits;
1749    } else if (KnownOne.intersects(InSignBit)) {   // Input sign bit known set
1750      KnownOne  |= NewBits;
1751      KnownZero &= ~NewBits;
1752    } else {                              // Input sign bit unknown
1753      KnownZero &= ~NewBits;
1754      KnownOne  &= ~NewBits;
1755    }
1756    return;
1757  }
1758  case ISD::CTTZ:
1759  case ISD::CTLZ:
1760  case ISD::CTPOP: {
1761    unsigned LowBits = Log2_32(BitWidth)+1;
1762    KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1763    KnownOne.clear();
1764    return;
1765  }
1766  case ISD::LOAD: {
1767    if (ISD::isZEXTLoad(Op.getNode())) {
1768      LoadSDNode *LD = cast<LoadSDNode>(Op);
1769      MVT VT = LD->getMemoryVT();
1770      unsigned MemBits = VT.getSizeInBits();
1771      KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
1772    }
1773    return;
1774  }
1775  case ISD::ZERO_EXTEND: {
1776    MVT InVT = Op.getOperand(0).getValueType();
1777    unsigned InBits = InVT.getSizeInBits();
1778    APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
1779    APInt InMask    = Mask;
1780    InMask.trunc(InBits);
1781    KnownZero.trunc(InBits);
1782    KnownOne.trunc(InBits);
1783    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1784    KnownZero.zext(BitWidth);
1785    KnownOne.zext(BitWidth);
1786    KnownZero |= NewBits;
1787    return;
1788  }
1789  case ISD::SIGN_EXTEND: {
1790    MVT InVT = Op.getOperand(0).getValueType();
1791    unsigned InBits = InVT.getSizeInBits();
1792    APInt InSignBit = APInt::getSignBit(InBits);
1793    APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
1794    APInt InMask = Mask;
1795    InMask.trunc(InBits);
1796
1797    // If any of the sign extended bits are demanded, we know that the sign
1798    // bit is demanded. Temporarily set this bit in the mask for our callee.
1799    if (NewBits.getBoolValue())
1800      InMask |= InSignBit;
1801
1802    KnownZero.trunc(InBits);
1803    KnownOne.trunc(InBits);
1804    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1805
1806    // Note if the sign bit is known to be zero or one.
1807    bool SignBitKnownZero = KnownZero.isNegative();
1808    bool SignBitKnownOne  = KnownOne.isNegative();
1809    assert(!(SignBitKnownZero && SignBitKnownOne) &&
1810           "Sign bit can't be known to be both zero and one!");
1811
1812    // If the sign bit wasn't actually demanded by our caller, we don't
1813    // want it set in the KnownZero and KnownOne result values. Reset the
1814    // mask and reapply it to the result values.
1815    InMask = Mask;
1816    InMask.trunc(InBits);
1817    KnownZero &= InMask;
1818    KnownOne  &= InMask;
1819
1820    KnownZero.zext(BitWidth);
1821    KnownOne.zext(BitWidth);
1822
1823    // If the sign bit is known zero or one, the top bits match.
1824    if (SignBitKnownZero)
1825      KnownZero |= NewBits;
1826    else if (SignBitKnownOne)
1827      KnownOne  |= NewBits;
1828    return;
1829  }
1830  case ISD::ANY_EXTEND: {
1831    MVT InVT = Op.getOperand(0).getValueType();
1832    unsigned InBits = InVT.getSizeInBits();
1833    APInt InMask = Mask;
1834    InMask.trunc(InBits);
1835    KnownZero.trunc(InBits);
1836    KnownOne.trunc(InBits);
1837    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1838    KnownZero.zext(BitWidth);
1839    KnownOne.zext(BitWidth);
1840    return;
1841  }
1842  case ISD::TRUNCATE: {
1843    MVT InVT = Op.getOperand(0).getValueType();
1844    unsigned InBits = InVT.getSizeInBits();
1845    APInt InMask = Mask;
1846    InMask.zext(InBits);
1847    KnownZero.zext(InBits);
1848    KnownOne.zext(InBits);
1849    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1850    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1851    KnownZero.trunc(BitWidth);
1852    KnownOne.trunc(BitWidth);
1853    break;
1854  }
1855  case ISD::AssertZext: {
1856    MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1857    APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
1858    ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1859                      KnownOne, Depth+1);
1860    KnownZero |= (~InMask) & Mask;
1861    return;
1862  }
1863  case ISD::FGETSIGN:
1864    // All bits are zero except the low bit.
1865    KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1866    return;
1867
1868  case ISD::SUB: {
1869    if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
1870      // We know that the top bits of C-X are clear if X contains less bits
1871      // than C (i.e. no wrap-around can happen).  For example, 20-X is
1872      // positive if we can prove that X is >= 0 and < 16.
1873      if (CLHS->getAPIntValue().isNonNegative()) {
1874        unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
1875        // NLZ can't be BitWidth with no sign bit
1876        APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
1877        ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero2, KnownOne2,
1878                          Depth+1);
1879
1880        // If all of the MaskV bits are known to be zero, then we know the
1881        // output top bits are zero, because we now know that the output is
1882        // from [0-C].
1883        if ((KnownZero2 & MaskV) == MaskV) {
1884          unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
1885          // Top bits known zero.
1886          KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
1887        }
1888      }
1889    }
1890  }
1891  // fall through
1892  case ISD::ADD: {
1893    // Output known-0 bits are known if clear or set in both the low clear bits
1894    // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
1895    // low 3 bits clear.
1896    APInt Mask2 = APInt::getLowBitsSet(BitWidth, Mask.countTrailingOnes());
1897    ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
1898    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1899    unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
1900
1901    ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero2, KnownOne2, Depth+1);
1902    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1903    KnownZeroOut = std::min(KnownZeroOut,
1904                            KnownZero2.countTrailingOnes());
1905
1906    KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
1907    return;
1908  }
1909  case ISD::SREM:
1910    if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1911      const APInt &RA = Rem->getAPIntValue();
1912      if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
1913        APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA;
1914        APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1915        ComputeMaskedBits(Op.getOperand(0), Mask2,KnownZero2,KnownOne2,Depth+1);
1916
1917        // If the sign bit of the first operand is zero, the sign bit of
1918        // the result is zero. If the first operand has no one bits below
1919        // the second operand's single 1 bit, its sign will be zero.
1920        if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1921          KnownZero2 |= ~LowBits;
1922
1923        KnownZero |= KnownZero2 & Mask;
1924
1925        assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
1926      }
1927    }
1928    return;
1929  case ISD::UREM: {
1930    if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1931      const APInt &RA = Rem->getAPIntValue();
1932      if (RA.isPowerOf2()) {
1933        APInt LowBits = (RA - 1);
1934        APInt Mask2 = LowBits & Mask;
1935        KnownZero |= ~LowBits & Mask;
1936        ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero, KnownOne,Depth+1);
1937        assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
1938        break;
1939      }
1940    }
1941
1942    // Since the result is less than or equal to either operand, any leading
1943    // zero bits in either operand must also exist in the result.
1944    APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1945    ComputeMaskedBits(Op.getOperand(0), AllOnes, KnownZero, KnownOne,
1946                      Depth+1);
1947    ComputeMaskedBits(Op.getOperand(1), AllOnes, KnownZero2, KnownOne2,
1948                      Depth+1);
1949
1950    uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
1951                                KnownZero2.countLeadingOnes());
1952    KnownOne.clear();
1953    KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
1954    return;
1955  }
1956  default:
1957    // Allow the target to implement this method for its nodes.
1958    if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
1959  case ISD::INTRINSIC_WO_CHAIN:
1960  case ISD::INTRINSIC_W_CHAIN:
1961  case ISD::INTRINSIC_VOID:
1962      TLI.computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne, *this);
1963    }
1964    return;
1965  }
1966}
1967
1968/// ComputeNumSignBits - Return the number of times the sign bit of the
1969/// register is replicated into the other bits.  We know that at least 1 bit
1970/// is always equal to the sign bit (itself), but other cases can give us
1971/// information.  For example, immediately after an "SRA X, 2", we know that
1972/// the top 3 bits are all equal to each other, so we return 3.
1973unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
1974  MVT VT = Op.getValueType();
1975  assert(VT.isInteger() && "Invalid VT!");
1976  unsigned VTBits = VT.getSizeInBits();
1977  unsigned Tmp, Tmp2;
1978  unsigned FirstAnswer = 1;
1979
1980  if (Depth == 6)
1981    return 1;  // Limit search depth.
1982
1983  switch (Op.getOpcode()) {
1984  default: break;
1985  case ISD::AssertSext:
1986    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
1987    return VTBits-Tmp+1;
1988  case ISD::AssertZext:
1989    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
1990    return VTBits-Tmp;
1991
1992  case ISD::Constant: {
1993    const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
1994    // If negative, return # leading ones.
1995    if (Val.isNegative())
1996      return Val.countLeadingOnes();
1997
1998    // Return # leading zeros.
1999    return Val.countLeadingZeros();
2000  }
2001
2002  case ISD::SIGN_EXTEND:
2003    Tmp = VTBits-Op.getOperand(0).getValueType().getSizeInBits();
2004    return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2005
2006  case ISD::SIGN_EXTEND_INREG:
2007    // Max of the input and what this extends.
2008    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2009    Tmp = VTBits-Tmp+1;
2010
2011    Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2012    return std::max(Tmp, Tmp2);
2013
2014  case ISD::SRA:
2015    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2016    // SRA X, C   -> adds C sign bits.
2017    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2018      Tmp += C->getZExtValue();
2019      if (Tmp > VTBits) Tmp = VTBits;
2020    }
2021    return Tmp;
2022  case ISD::SHL:
2023    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2024      // shl destroys sign bits.
2025      Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2026      if (C->getZExtValue() >= VTBits ||      // Bad shift.
2027          C->getZExtValue() >= Tmp) break;    // Shifted all sign bits out.
2028      return Tmp - C->getZExtValue();
2029    }
2030    break;
2031  case ISD::AND:
2032  case ISD::OR:
2033  case ISD::XOR:    // NOT is handled here.
2034    // Logical binary ops preserve the number of sign bits at the worst.
2035    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2036    if (Tmp != 1) {
2037      Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2038      FirstAnswer = std::min(Tmp, Tmp2);
2039      // We computed what we know about the sign bits as our first
2040      // answer. Now proceed to the generic code that uses
2041      // ComputeMaskedBits, and pick whichever answer is better.
2042    }
2043    break;
2044
2045  case ISD::SELECT:
2046    Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2047    if (Tmp == 1) return 1;  // Early out.
2048    Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2049    return std::min(Tmp, Tmp2);
2050
2051  case ISD::SADDO:
2052  case ISD::UADDO:
2053  case ISD::SSUBO:
2054  case ISD::USUBO:
2055  case ISD::SMULO:
2056  case ISD::UMULO:
2057    if (Op.getResNo() != 1)
2058      break;
2059    // The boolean result conforms to getBooleanContents.  Fall through.
2060  case ISD::SETCC:
2061    // If setcc returns 0/-1, all bits are sign bits.
2062    if (TLI.getBooleanContents() ==
2063        TargetLowering::ZeroOrNegativeOneBooleanContent)
2064      return VTBits;
2065    break;
2066  case ISD::ROTL:
2067  case ISD::ROTR:
2068    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2069      unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2070
2071      // Handle rotate right by N like a rotate left by 32-N.
2072      if (Op.getOpcode() == ISD::ROTR)
2073        RotAmt = (VTBits-RotAmt) & (VTBits-1);
2074
2075      // If we aren't rotating out all of the known-in sign bits, return the
2076      // number that are left.  This handles rotl(sext(x), 1) for example.
2077      Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2078      if (Tmp > RotAmt+1) return Tmp-RotAmt;
2079    }
2080    break;
2081  case ISD::ADD:
2082    // Add can have at most one carry bit.  Thus we know that the output
2083    // is, at worst, one more bit than the inputs.
2084    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2085    if (Tmp == 1) return 1;  // Early out.
2086
2087    // Special case decrementing a value (ADD X, -1):
2088    if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2089      if (CRHS->isAllOnesValue()) {
2090        APInt KnownZero, KnownOne;
2091        APInt Mask = APInt::getAllOnesValue(VTBits);
2092        ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
2093
2094        // If the input is known to be 0 or 1, the output is 0/-1, which is all
2095        // sign bits set.
2096        if ((KnownZero | APInt(VTBits, 1)) == Mask)
2097          return VTBits;
2098
2099        // If we are subtracting one from a positive number, there is no carry
2100        // out of the result.
2101        if (KnownZero.isNegative())
2102          return Tmp;
2103      }
2104
2105    Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2106    if (Tmp2 == 1) return 1;
2107      return std::min(Tmp, Tmp2)-1;
2108    break;
2109
2110  case ISD::SUB:
2111    Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2112    if (Tmp2 == 1) return 1;
2113
2114    // Handle NEG.
2115    if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2116      if (CLHS->isNullValue()) {
2117        APInt KnownZero, KnownOne;
2118        APInt Mask = APInt::getAllOnesValue(VTBits);
2119        ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
2120        // If the input is known to be 0 or 1, the output is 0/-1, which is all
2121        // sign bits set.
2122        if ((KnownZero | APInt(VTBits, 1)) == Mask)
2123          return VTBits;
2124
2125        // If the input is known to be positive (the sign bit is known clear),
2126        // the output of the NEG has the same number of sign bits as the input.
2127        if (KnownZero.isNegative())
2128          return Tmp2;
2129
2130        // Otherwise, we treat this like a SUB.
2131      }
2132
2133    // Sub can have at most one carry bit.  Thus we know that the output
2134    // is, at worst, one more bit than the inputs.
2135    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2136    if (Tmp == 1) return 1;  // Early out.
2137      return std::min(Tmp, Tmp2)-1;
2138    break;
2139  case ISD::TRUNCATE:
2140    // FIXME: it's tricky to do anything useful for this, but it is an important
2141    // case for targets like X86.
2142    break;
2143  }
2144
2145  // Handle LOADX separately here. EXTLOAD case will fallthrough.
2146  if (Op.getOpcode() == ISD::LOAD) {
2147    LoadSDNode *LD = cast<LoadSDNode>(Op);
2148    unsigned ExtType = LD->getExtensionType();
2149    switch (ExtType) {
2150    default: break;
2151    case ISD::SEXTLOAD:    // '17' bits known
2152      Tmp = LD->getMemoryVT().getSizeInBits();
2153      return VTBits-Tmp+1;
2154    case ISD::ZEXTLOAD:    // '16' bits known
2155      Tmp = LD->getMemoryVT().getSizeInBits();
2156      return VTBits-Tmp;
2157    }
2158  }
2159
2160  // Allow the target to implement this method for its nodes.
2161  if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2162      Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2163      Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2164      Op.getOpcode() == ISD::INTRINSIC_VOID) {
2165    unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth);
2166    if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2167  }
2168
2169  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2170  // use this information.
2171  APInt KnownZero, KnownOne;
2172  APInt Mask = APInt::getAllOnesValue(VTBits);
2173  ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
2174
2175  if (KnownZero.isNegative()) {        // sign bit is 0
2176    Mask = KnownZero;
2177  } else if (KnownOne.isNegative()) {  // sign bit is 1;
2178    Mask = KnownOne;
2179  } else {
2180    // Nothing known.
2181    return FirstAnswer;
2182  }
2183
2184  // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2185  // the number of identical bits in the top of the input value.
2186  Mask = ~Mask;
2187  Mask <<= Mask.getBitWidth()-VTBits;
2188  // Return # leading zeros.  We use 'min' here in case Val was zero before
2189  // shifting.  We don't want to return '64' as for an i32 "0".
2190  return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2191}
2192
2193
2194bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
2195  GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2196  if (!GA) return false;
2197  if (GA->getOffset() != 0) return false;
2198  GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal());
2199  if (!GV) return false;
2200  MachineModuleInfo *MMI = getMachineModuleInfo();
2201  return MMI && MMI->hasDebugInfo();
2202}
2203
2204
2205/// getShuffleScalarElt - Returns the scalar element that will make up the ith
2206/// element of the result of the vector shuffle.
2207SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
2208                                          unsigned i) {
2209  MVT VT = N->getValueType(0);
2210  DebugLoc dl = N->getDebugLoc();
2211  if (N->getMaskElt(i) < 0)
2212    return getUNDEF(VT.getVectorElementType());
2213  unsigned Index = N->getMaskElt(i);
2214  unsigned NumElems = VT.getVectorNumElements();
2215  SDValue V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
2216  Index %= NumElems;
2217
2218  if (V.getOpcode() == ISD::BIT_CONVERT) {
2219    V = V.getOperand(0);
2220    MVT VVT = V.getValueType();
2221    if (!VVT.isVector() || VVT.getVectorNumElements() != (unsigned)NumElems)
2222      return SDValue();
2223  }
2224  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
2225    return (Index == 0) ? V.getOperand(0)
2226                      : getUNDEF(VT.getVectorElementType());
2227  if (V.getOpcode() == ISD::BUILD_VECTOR)
2228    return V.getOperand(Index);
2229  if (const ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(V))
2230    return getShuffleScalarElt(SVN, Index);
2231  return SDValue();
2232}
2233
2234
2235/// getNode - Gets or creates the specified node.
2236///
2237SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT) {
2238  FoldingSetNodeID ID;
2239  AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2240  void *IP = 0;
2241  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2242    return SDValue(E, 0);
2243  SDNode *N = NodeAllocator.Allocate<SDNode>();
2244  new (N) SDNode(Opcode, DL, getVTList(VT));
2245  CSEMap.InsertNode(N, IP);
2246
2247  AllNodes.push_back(N);
2248#ifndef NDEBUG
2249  VerifyNode(N);
2250#endif
2251  return SDValue(N, 0);
2252}
2253
2254SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
2255                              MVT VT, SDValue Operand) {
2256  // Constant fold unary operations with an integer constant operand.
2257  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2258    const APInt &Val = C->getAPIntValue();
2259    unsigned BitWidth = VT.getSizeInBits();
2260    switch (Opcode) {
2261    default: break;
2262    case ISD::SIGN_EXTEND:
2263      return getConstant(APInt(Val).sextOrTrunc(BitWidth), VT);
2264    case ISD::ANY_EXTEND:
2265    case ISD::ZERO_EXTEND:
2266    case ISD::TRUNCATE:
2267      return getConstant(APInt(Val).zextOrTrunc(BitWidth), VT);
2268    case ISD::UINT_TO_FP:
2269    case ISD::SINT_TO_FP: {
2270      const uint64_t zero[] = {0, 0};
2271      // No compile time operations on this type.
2272      if (VT==MVT::ppcf128)
2273        break;
2274      APFloat apf = APFloat(APInt(BitWidth, 2, zero));
2275      (void)apf.convertFromAPInt(Val,
2276                                 Opcode==ISD::SINT_TO_FP,
2277                                 APFloat::rmNearestTiesToEven);
2278      return getConstantFP(apf, VT);
2279    }
2280    case ISD::BIT_CONVERT:
2281      if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2282        return getConstantFP(Val.bitsToFloat(), VT);
2283      else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2284        return getConstantFP(Val.bitsToDouble(), VT);
2285      break;
2286    case ISD::BSWAP:
2287      return getConstant(Val.byteSwap(), VT);
2288    case ISD::CTPOP:
2289      return getConstant(Val.countPopulation(), VT);
2290    case ISD::CTLZ:
2291      return getConstant(Val.countLeadingZeros(), VT);
2292    case ISD::CTTZ:
2293      return getConstant(Val.countTrailingZeros(), VT);
2294    }
2295  }
2296
2297  // Constant fold unary operations with a floating point constant operand.
2298  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2299    APFloat V = C->getValueAPF();    // make copy
2300    if (VT != MVT::ppcf128 && Operand.getValueType() != MVT::ppcf128) {
2301      switch (Opcode) {
2302      case ISD::FNEG:
2303        V.changeSign();
2304        return getConstantFP(V, VT);
2305      case ISD::FABS:
2306        V.clearSign();
2307        return getConstantFP(V, VT);
2308      case ISD::FP_ROUND:
2309      case ISD::FP_EXTEND: {
2310        bool ignored;
2311        // This can return overflow, underflow, or inexact; we don't care.
2312        // FIXME need to be more flexible about rounding mode.
2313        (void)V.convert(*MVTToAPFloatSemantics(VT),
2314                        APFloat::rmNearestTiesToEven, &ignored);
2315        return getConstantFP(V, VT);
2316      }
2317      case ISD::FP_TO_SINT:
2318      case ISD::FP_TO_UINT: {
2319        integerPart x[2];
2320        bool ignored;
2321        assert(integerPartWidth >= 64);
2322        // FIXME need to be more flexible about rounding mode.
2323        APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2324                              Opcode==ISD::FP_TO_SINT,
2325                              APFloat::rmTowardZero, &ignored);
2326        if (s==APFloat::opInvalidOp)     // inexact is OK, in fact usual
2327          break;
2328        APInt api(VT.getSizeInBits(), 2, x);
2329        return getConstant(api, VT);
2330      }
2331      case ISD::BIT_CONVERT:
2332        if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2333          return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2334        else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2335          return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2336        break;
2337      }
2338    }
2339  }
2340
2341  unsigned OpOpcode = Operand.getNode()->getOpcode();
2342  switch (Opcode) {
2343  case ISD::TokenFactor:
2344  case ISD::MERGE_VALUES:
2345  case ISD::CONCAT_VECTORS:
2346    return Operand;         // Factor, merge or concat of one node?  No need.
2347  case ISD::FP_ROUND: assert(0 && "Invalid method to make FP_ROUND node");
2348  case ISD::FP_EXTEND:
2349    assert(VT.isFloatingPoint() &&
2350           Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2351    if (Operand.getValueType() == VT) return Operand;  // noop conversion.
2352    if (Operand.getOpcode() == ISD::UNDEF)
2353      return getUNDEF(VT);
2354    break;
2355  case ISD::SIGN_EXTEND:
2356    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2357           "Invalid SIGN_EXTEND!");
2358    if (Operand.getValueType() == VT) return Operand;   // noop extension
2359    assert(Operand.getValueType().bitsLT(VT)
2360           && "Invalid sext node, dst < src!");
2361    if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2362      return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2363    break;
2364  case ISD::ZERO_EXTEND:
2365    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2366           "Invalid ZERO_EXTEND!");
2367    if (Operand.getValueType() == VT) return Operand;   // noop extension
2368    assert(Operand.getValueType().bitsLT(VT)
2369           && "Invalid zext node, dst < src!");
2370    if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
2371      return getNode(ISD::ZERO_EXTEND, DL, VT,
2372                     Operand.getNode()->getOperand(0));
2373    break;
2374  case ISD::ANY_EXTEND:
2375    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2376           "Invalid ANY_EXTEND!");
2377    if (Operand.getValueType() == VT) return Operand;   // noop extension
2378    assert(Operand.getValueType().bitsLT(VT)
2379           && "Invalid anyext node, dst < src!");
2380    if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND)
2381      // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
2382      return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2383    break;
2384  case ISD::TRUNCATE:
2385    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2386           "Invalid TRUNCATE!");
2387    if (Operand.getValueType() == VT) return Operand;   // noop truncate
2388    assert(Operand.getValueType().bitsGT(VT)
2389           && "Invalid truncate node, src < dst!");
2390    if (OpOpcode == ISD::TRUNCATE)
2391      return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2392    else if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2393             OpOpcode == ISD::ANY_EXTEND) {
2394      // If the source is smaller than the dest, we still need an extend.
2395      if (Operand.getNode()->getOperand(0).getValueType().bitsLT(VT))
2396        return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2397      else if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2398        return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2399      else
2400        return Operand.getNode()->getOperand(0);
2401    }
2402    break;
2403  case ISD::BIT_CONVERT:
2404    // Basic sanity checking.
2405    assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2406           && "Cannot BIT_CONVERT between types of different sizes!");
2407    if (VT == Operand.getValueType()) return Operand;  // noop conversion.
2408    if (OpOpcode == ISD::BIT_CONVERT)  // bitconv(bitconv(x)) -> bitconv(x)
2409      return getNode(ISD::BIT_CONVERT, DL, VT, Operand.getOperand(0));
2410    if (OpOpcode == ISD::UNDEF)
2411      return getUNDEF(VT);
2412    break;
2413  case ISD::SCALAR_TO_VECTOR:
2414    assert(VT.isVector() && !Operand.getValueType().isVector() &&
2415           (VT.getVectorElementType() == Operand.getValueType() ||
2416            (VT.getVectorElementType().isInteger() &&
2417             Operand.getValueType().isInteger() &&
2418             VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2419           "Illegal SCALAR_TO_VECTOR node!");
2420    if (OpOpcode == ISD::UNDEF)
2421      return getUNDEF(VT);
2422    // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2423    if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2424        isa<ConstantSDNode>(Operand.getOperand(1)) &&
2425        Operand.getConstantOperandVal(1) == 0 &&
2426        Operand.getOperand(0).getValueType() == VT)
2427      return Operand.getOperand(0);
2428    break;
2429  case ISD::FNEG:
2430    // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2431    if (UnsafeFPMath && OpOpcode == ISD::FSUB)
2432      return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2433                     Operand.getNode()->getOperand(0));
2434    if (OpOpcode == ISD::FNEG)  // --X -> X
2435      return Operand.getNode()->getOperand(0);
2436    break;
2437  case ISD::FABS:
2438    if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
2439      return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2440    break;
2441  }
2442
2443  SDNode *N;
2444  SDVTList VTs = getVTList(VT);
2445  if (VT != MVT::Flag) { // Don't CSE flag producing nodes
2446    FoldingSetNodeID ID;
2447    SDValue Ops[1] = { Operand };
2448    AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2449    void *IP = 0;
2450    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2451      return SDValue(E, 0);
2452    N = NodeAllocator.Allocate<UnarySDNode>();
2453    new (N) UnarySDNode(Opcode, DL, VTs, Operand);
2454    CSEMap.InsertNode(N, IP);
2455  } else {
2456    N = NodeAllocator.Allocate<UnarySDNode>();
2457    new (N) UnarySDNode(Opcode, DL, VTs, Operand);
2458  }
2459
2460  AllNodes.push_back(N);
2461#ifndef NDEBUG
2462  VerifyNode(N);
2463#endif
2464  return SDValue(N, 0);
2465}
2466
2467SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode,
2468                                             MVT VT,
2469                                             ConstantSDNode *Cst1,
2470                                             ConstantSDNode *Cst2) {
2471  const APInt &C1 = Cst1->getAPIntValue(), &C2 = Cst2->getAPIntValue();
2472
2473  switch (Opcode) {
2474  case ISD::ADD:  return getConstant(C1 + C2, VT);
2475  case ISD::SUB:  return getConstant(C1 - C2, VT);
2476  case ISD::MUL:  return getConstant(C1 * C2, VT);
2477  case ISD::UDIV:
2478    if (C2.getBoolValue()) return getConstant(C1.udiv(C2), VT);
2479    break;
2480  case ISD::UREM:
2481    if (C2.getBoolValue()) return getConstant(C1.urem(C2), VT);
2482    break;
2483  case ISD::SDIV:
2484    if (C2.getBoolValue()) return getConstant(C1.sdiv(C2), VT);
2485    break;
2486  case ISD::SREM:
2487    if (C2.getBoolValue()) return getConstant(C1.srem(C2), VT);
2488    break;
2489  case ISD::AND:  return getConstant(C1 & C2, VT);
2490  case ISD::OR:   return getConstant(C1 | C2, VT);
2491  case ISD::XOR:  return getConstant(C1 ^ C2, VT);
2492  case ISD::SHL:  return getConstant(C1 << C2, VT);
2493  case ISD::SRL:  return getConstant(C1.lshr(C2), VT);
2494  case ISD::SRA:  return getConstant(C1.ashr(C2), VT);
2495  case ISD::ROTL: return getConstant(C1.rotl(C2), VT);
2496  case ISD::ROTR: return getConstant(C1.rotr(C2), VT);
2497  default: break;
2498  }
2499
2500  return SDValue();
2501}
2502
2503SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
2504                              SDValue N1, SDValue N2) {
2505  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2506  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2507  switch (Opcode) {
2508  default: break;
2509  case ISD::TokenFactor:
2510    assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2511           N2.getValueType() == MVT::Other && "Invalid token factor!");
2512    // Fold trivial token factors.
2513    if (N1.getOpcode() == ISD::EntryToken) return N2;
2514    if (N2.getOpcode() == ISD::EntryToken) return N1;
2515    if (N1 == N2) return N1;
2516    break;
2517  case ISD::CONCAT_VECTORS:
2518    // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2519    // one big BUILD_VECTOR.
2520    if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2521        N2.getOpcode() == ISD::BUILD_VECTOR) {
2522      SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
2523      Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
2524      return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2525    }
2526    break;
2527  case ISD::AND:
2528    assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
2529           N1.getValueType() == VT && "Binary operator types must match!");
2530    // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
2531    // worth handling here.
2532    if (N2C && N2C->isNullValue())
2533      return N2;
2534    if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
2535      return N1;
2536    break;
2537  case ISD::OR:
2538  case ISD::XOR:
2539  case ISD::ADD:
2540  case ISD::SUB:
2541    assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
2542           N1.getValueType() == VT && "Binary operator types must match!");
2543    // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
2544    // it's worth handling here.
2545    if (N2C && N2C->isNullValue())
2546      return N1;
2547    break;
2548  case ISD::UDIV:
2549  case ISD::UREM:
2550  case ISD::MULHU:
2551  case ISD::MULHS:
2552  case ISD::MUL:
2553  case ISD::SDIV:
2554  case ISD::SREM:
2555    assert(VT.isInteger() && "This operator does not apply to FP types!");
2556    // fall through
2557  case ISD::FADD:
2558  case ISD::FSUB:
2559  case ISD::FMUL:
2560  case ISD::FDIV:
2561  case ISD::FREM:
2562    if (UnsafeFPMath) {
2563      if (Opcode == ISD::FADD) {
2564        // 0+x --> x
2565        if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2566          if (CFP->getValueAPF().isZero())
2567            return N2;
2568        // x+0 --> x
2569        if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2570          if (CFP->getValueAPF().isZero())
2571            return N1;
2572      } else if (Opcode == ISD::FSUB) {
2573        // x-0 --> x
2574        if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2575          if (CFP->getValueAPF().isZero())
2576            return N1;
2577      }
2578    }
2579    assert(N1.getValueType() == N2.getValueType() &&
2580           N1.getValueType() == VT && "Binary operator types must match!");
2581    break;
2582  case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
2583    assert(N1.getValueType() == VT &&
2584           N1.getValueType().isFloatingPoint() &&
2585           N2.getValueType().isFloatingPoint() &&
2586           "Invalid FCOPYSIGN!");
2587    break;
2588  case ISD::SHL:
2589  case ISD::SRA:
2590  case ISD::SRL:
2591  case ISD::ROTL:
2592  case ISD::ROTR:
2593    assert(VT == N1.getValueType() &&
2594           "Shift operators return type must be the same as their first arg");
2595    assert(VT.isInteger() && N2.getValueType().isInteger() &&
2596           "Shifts only work on integers");
2597
2598    // Always fold shifts of i1 values so the code generator doesn't need to
2599    // handle them.  Since we know the size of the shift has to be less than the
2600    // size of the value, the shift/rotate count is guaranteed to be zero.
2601    if (VT == MVT::i1)
2602      return N1;
2603    break;
2604  case ISD::FP_ROUND_INREG: {
2605    MVT EVT = cast<VTSDNode>(N2)->getVT();
2606    assert(VT == N1.getValueType() && "Not an inreg round!");
2607    assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
2608           "Cannot FP_ROUND_INREG integer types");
2609    assert(EVT.bitsLE(VT) && "Not rounding down!");
2610    if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
2611    break;
2612  }
2613  case ISD::FP_ROUND:
2614    assert(VT.isFloatingPoint() &&
2615           N1.getValueType().isFloatingPoint() &&
2616           VT.bitsLE(N1.getValueType()) &&
2617           isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
2618    if (N1.getValueType() == VT) return N1;  // noop conversion.
2619    break;
2620  case ISD::AssertSext:
2621  case ISD::AssertZext: {
2622    MVT EVT = cast<VTSDNode>(N2)->getVT();
2623    assert(VT == N1.getValueType() && "Not an inreg extend!");
2624    assert(VT.isInteger() && EVT.isInteger() &&
2625           "Cannot *_EXTEND_INREG FP types");
2626    assert(EVT.bitsLE(VT) && "Not extending!");
2627    if (VT == EVT) return N1; // noop assertion.
2628    break;
2629  }
2630  case ISD::SIGN_EXTEND_INREG: {
2631    MVT EVT = cast<VTSDNode>(N2)->getVT();
2632    assert(VT == N1.getValueType() && "Not an inreg extend!");
2633    assert(VT.isInteger() && EVT.isInteger() &&
2634           "Cannot *_EXTEND_INREG FP types");
2635    assert(EVT.bitsLE(VT) && "Not extending!");
2636    if (EVT == VT) return N1;  // Not actually extending
2637
2638    if (N1C) {
2639      APInt Val = N1C->getAPIntValue();
2640      unsigned FromBits = cast<VTSDNode>(N2)->getVT().getSizeInBits();
2641      Val <<= Val.getBitWidth()-FromBits;
2642      Val = Val.ashr(Val.getBitWidth()-FromBits);
2643      return getConstant(Val, VT);
2644    }
2645    break;
2646  }
2647  case ISD::EXTRACT_VECTOR_ELT:
2648    // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
2649    if (N1.getOpcode() == ISD::UNDEF)
2650      return getUNDEF(VT);
2651
2652    // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
2653    // expanding copies of large vectors from registers.
2654    if (N2C &&
2655        N1.getOpcode() == ISD::CONCAT_VECTORS &&
2656        N1.getNumOperands() > 0) {
2657      unsigned Factor =
2658        N1.getOperand(0).getValueType().getVectorNumElements();
2659      return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
2660                     N1.getOperand(N2C->getZExtValue() / Factor),
2661                     getConstant(N2C->getZExtValue() % Factor,
2662                                 N2.getValueType()));
2663    }
2664
2665    // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
2666    // expanding large vector constants.
2667    if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
2668      SDValue Elt = N1.getOperand(N2C->getZExtValue());
2669      if (Elt.getValueType() != VT) {
2670        // If the vector element type is not legal, the BUILD_VECTOR operands
2671        // are promoted and implicitly truncated.  Make that explicit here.
2672        assert(VT.isInteger() && Elt.getValueType().isInteger() &&
2673               VT.bitsLE(Elt.getValueType()) &&
2674               "Bad type for BUILD_VECTOR operand");
2675        Elt = getNode(ISD::TRUNCATE, DL, VT, Elt);
2676      }
2677      return Elt;
2678    }
2679
2680    // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
2681    // operations are lowered to scalars.
2682    if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
2683      // If the indices are the same, return the inserted element.
2684      if (N1.getOperand(2) == N2)
2685        return N1.getOperand(1);
2686      // If the indices are known different, extract the element from
2687      // the original vector.
2688      else if (isa<ConstantSDNode>(N1.getOperand(2)) &&
2689               isa<ConstantSDNode>(N2))
2690        return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
2691    }
2692    break;
2693  case ISD::EXTRACT_ELEMENT:
2694    assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
2695    assert(!N1.getValueType().isVector() && !VT.isVector() &&
2696           (N1.getValueType().isInteger() == VT.isInteger()) &&
2697           "Wrong types for EXTRACT_ELEMENT!");
2698
2699    // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
2700    // 64-bit integers into 32-bit parts.  Instead of building the extract of
2701    // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
2702    if (N1.getOpcode() == ISD::BUILD_PAIR)
2703      return N1.getOperand(N2C->getZExtValue());
2704
2705    // EXTRACT_ELEMENT of a constant int is also very common.
2706    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
2707      unsigned ElementSize = VT.getSizeInBits();
2708      unsigned Shift = ElementSize * N2C->getZExtValue();
2709      APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
2710      return getConstant(ShiftedVal.trunc(ElementSize), VT);
2711    }
2712    break;
2713  case ISD::EXTRACT_SUBVECTOR:
2714    if (N1.getValueType() == VT) // Trivial extraction.
2715      return N1;
2716    break;
2717  }
2718
2719  if (N1C) {
2720    if (N2C) {
2721      SDValue SV = FoldConstantArithmetic(Opcode, VT, N1C, N2C);
2722      if (SV.getNode()) return SV;
2723    } else {      // Cannonicalize constant to RHS if commutative
2724      if (isCommutativeBinOp(Opcode)) {
2725        std::swap(N1C, N2C);
2726        std::swap(N1, N2);
2727      }
2728    }
2729  }
2730
2731  // Constant fold FP operations.
2732  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
2733  ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
2734  if (N1CFP) {
2735    if (!N2CFP && isCommutativeBinOp(Opcode)) {
2736      // Cannonicalize constant to RHS if commutative
2737      std::swap(N1CFP, N2CFP);
2738      std::swap(N1, N2);
2739    } else if (N2CFP && VT != MVT::ppcf128) {
2740      APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
2741      APFloat::opStatus s;
2742      switch (Opcode) {
2743      case ISD::FADD:
2744        s = V1.add(V2, APFloat::rmNearestTiesToEven);
2745        if (s != APFloat::opInvalidOp)
2746          return getConstantFP(V1, VT);
2747        break;
2748      case ISD::FSUB:
2749        s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
2750        if (s!=APFloat::opInvalidOp)
2751          return getConstantFP(V1, VT);
2752        break;
2753      case ISD::FMUL:
2754        s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
2755        if (s!=APFloat::opInvalidOp)
2756          return getConstantFP(V1, VT);
2757        break;
2758      case ISD::FDIV:
2759        s = V1.divide(V2, APFloat::rmNearestTiesToEven);
2760        if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
2761          return getConstantFP(V1, VT);
2762        break;
2763      case ISD::FREM :
2764        s = V1.mod(V2, APFloat::rmNearestTiesToEven);
2765        if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
2766          return getConstantFP(V1, VT);
2767        break;
2768      case ISD::FCOPYSIGN:
2769        V1.copySign(V2);
2770        return getConstantFP(V1, VT);
2771      default: break;
2772      }
2773    }
2774  }
2775
2776  // Canonicalize an UNDEF to the RHS, even over a constant.
2777  if (N1.getOpcode() == ISD::UNDEF) {
2778    if (isCommutativeBinOp(Opcode)) {
2779      std::swap(N1, N2);
2780    } else {
2781      switch (Opcode) {
2782      case ISD::FP_ROUND_INREG:
2783      case ISD::SIGN_EXTEND_INREG:
2784      case ISD::SUB:
2785      case ISD::FSUB:
2786      case ISD::FDIV:
2787      case ISD::FREM:
2788      case ISD::SRA:
2789        return N1;     // fold op(undef, arg2) -> undef
2790      case ISD::UDIV:
2791      case ISD::SDIV:
2792      case ISD::UREM:
2793      case ISD::SREM:
2794      case ISD::SRL:
2795      case ISD::SHL:
2796        if (!VT.isVector())
2797          return getConstant(0, VT);    // fold op(undef, arg2) -> 0
2798        // For vectors, we can't easily build an all zero vector, just return
2799        // the LHS.
2800        return N2;
2801      }
2802    }
2803  }
2804
2805  // Fold a bunch of operators when the RHS is undef.
2806  if (N2.getOpcode() == ISD::UNDEF) {
2807    switch (Opcode) {
2808    case ISD::XOR:
2809      if (N1.getOpcode() == ISD::UNDEF)
2810        // Handle undef ^ undef -> 0 special case. This is a common
2811        // idiom (misuse).
2812        return getConstant(0, VT);
2813      // fallthrough
2814    case ISD::ADD:
2815    case ISD::ADDC:
2816    case ISD::ADDE:
2817    case ISD::SUB:
2818    case ISD::UDIV:
2819    case ISD::SDIV:
2820    case ISD::UREM:
2821    case ISD::SREM:
2822      return N2;       // fold op(arg1, undef) -> undef
2823    case ISD::FADD:
2824    case ISD::FSUB:
2825    case ISD::FMUL:
2826    case ISD::FDIV:
2827    case ISD::FREM:
2828      if (UnsafeFPMath)
2829        return N2;
2830      break;
2831    case ISD::MUL:
2832    case ISD::AND:
2833    case ISD::SRL:
2834    case ISD::SHL:
2835      if (!VT.isVector())
2836        return getConstant(0, VT);  // fold op(arg1, undef) -> 0
2837      // For vectors, we can't easily build an all zero vector, just return
2838      // the LHS.
2839      return N1;
2840    case ISD::OR:
2841      if (!VT.isVector())
2842        return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
2843      // For vectors, we can't easily build an all one vector, just return
2844      // the LHS.
2845      return N1;
2846    case ISD::SRA:
2847      return N1;
2848    }
2849  }
2850
2851  // Memoize this node if possible.
2852  SDNode *N;
2853  SDVTList VTs = getVTList(VT);
2854  if (VT != MVT::Flag) {
2855    SDValue Ops[] = { N1, N2 };
2856    FoldingSetNodeID ID;
2857    AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
2858    void *IP = 0;
2859    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2860      return SDValue(E, 0);
2861    N = NodeAllocator.Allocate<BinarySDNode>();
2862    new (N) BinarySDNode(Opcode, DL, VTs, N1, N2);
2863    CSEMap.InsertNode(N, IP);
2864  } else {
2865    N = NodeAllocator.Allocate<BinarySDNode>();
2866    new (N) BinarySDNode(Opcode, DL, VTs, N1, N2);
2867  }
2868
2869  AllNodes.push_back(N);
2870#ifndef NDEBUG
2871  VerifyNode(N);
2872#endif
2873  return SDValue(N, 0);
2874}
2875
2876SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
2877                              SDValue N1, SDValue N2, SDValue N3) {
2878  // Perform various simplifications.
2879  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2880  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2881  switch (Opcode) {
2882  case ISD::CONCAT_VECTORS:
2883    // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2884    // one big BUILD_VECTOR.
2885    if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2886        N2.getOpcode() == ISD::BUILD_VECTOR &&
2887        N3.getOpcode() == ISD::BUILD_VECTOR) {
2888      SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
2889      Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
2890      Elts.insert(Elts.end(), N3.getNode()->op_begin(), N3.getNode()->op_end());
2891      return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2892    }
2893    break;
2894  case ISD::SETCC: {
2895    // Use FoldSetCC to simplify SETCC's.
2896    SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
2897    if (Simp.getNode()) return Simp;
2898    break;
2899  }
2900  case ISD::SELECT:
2901    if (N1C) {
2902     if (N1C->getZExtValue())
2903        return N2;             // select true, X, Y -> X
2904      else
2905        return N3;             // select false, X, Y -> Y
2906    }
2907
2908    if (N2 == N3) return N2;   // select C, X, X -> X
2909    break;
2910  case ISD::BRCOND:
2911    if (N2C) {
2912      if (N2C->getZExtValue()) // Unconditional branch
2913        return getNode(ISD::BR, DL, MVT::Other, N1, N3);
2914      else
2915        return N1;         // Never-taken branch
2916    }
2917    break;
2918  case ISD::VECTOR_SHUFFLE:
2919    assert(0 && "should use getVectorShuffle constructor!");
2920    break;
2921  case ISD::BIT_CONVERT:
2922    // Fold bit_convert nodes from a type to themselves.
2923    if (N1.getValueType() == VT)
2924      return N1;
2925    break;
2926  }
2927
2928  // Memoize node if it doesn't produce a flag.
2929  SDNode *N;
2930  SDVTList VTs = getVTList(VT);
2931  if (VT != MVT::Flag) {
2932    SDValue Ops[] = { N1, N2, N3 };
2933    FoldingSetNodeID ID;
2934    AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
2935    void *IP = 0;
2936    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2937      return SDValue(E, 0);
2938    N = NodeAllocator.Allocate<TernarySDNode>();
2939    new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
2940    CSEMap.InsertNode(N, IP);
2941  } else {
2942    N = NodeAllocator.Allocate<TernarySDNode>();
2943    new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
2944  }
2945  AllNodes.push_back(N);
2946#ifndef NDEBUG
2947  VerifyNode(N);
2948#endif
2949  return SDValue(N, 0);
2950}
2951
2952SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
2953                              SDValue N1, SDValue N2, SDValue N3,
2954                              SDValue N4) {
2955  SDValue Ops[] = { N1, N2, N3, N4 };
2956  return getNode(Opcode, DL, VT, Ops, 4);
2957}
2958
2959SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
2960                              SDValue N1, SDValue N2, SDValue N3,
2961                              SDValue N4, SDValue N5) {
2962  SDValue Ops[] = { N1, N2, N3, N4, N5 };
2963  return getNode(Opcode, DL, VT, Ops, 5);
2964}
2965
2966/// getMemsetValue - Vectorized representation of the memset value
2967/// operand.
2968static SDValue getMemsetValue(SDValue Value, MVT VT, SelectionDAG &DAG,
2969                              DebugLoc dl) {
2970  unsigned NumBits = VT.isVector() ?
2971    VT.getVectorElementType().getSizeInBits() : VT.getSizeInBits();
2972  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
2973    APInt Val = APInt(NumBits, C->getZExtValue() & 255);
2974    unsigned Shift = 8;
2975    for (unsigned i = NumBits; i > 8; i >>= 1) {
2976      Val = (Val << Shift) | Val;
2977      Shift <<= 1;
2978    }
2979    if (VT.isInteger())
2980      return DAG.getConstant(Val, VT);
2981    return DAG.getConstantFP(APFloat(Val), VT);
2982  }
2983
2984  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2985  Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
2986  unsigned Shift = 8;
2987  for (unsigned i = NumBits; i > 8; i >>= 1) {
2988    Value = DAG.getNode(ISD::OR, dl, VT,
2989                        DAG.getNode(ISD::SHL, dl, VT, Value,
2990                                    DAG.getConstant(Shift,
2991                                                    TLI.getShiftAmountTy())),
2992                        Value);
2993    Shift <<= 1;
2994  }
2995
2996  return Value;
2997}
2998
2999/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3000/// used when a memcpy is turned into a memset when the source is a constant
3001/// string ptr.
3002static SDValue getMemsetStringVal(MVT VT, DebugLoc dl, SelectionDAG &DAG,
3003                                    const TargetLowering &TLI,
3004                                    std::string &Str, unsigned Offset) {
3005  // Handle vector with all elements zero.
3006  if (Str.empty()) {
3007    if (VT.isInteger())
3008      return DAG.getConstant(0, VT);
3009    unsigned NumElts = VT.getVectorNumElements();
3010    MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3011    return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
3012                       DAG.getConstant(0, MVT::getVectorVT(EltVT, NumElts)));
3013  }
3014
3015  assert(!VT.isVector() && "Can't handle vector type here!");
3016  unsigned NumBits = VT.getSizeInBits();
3017  unsigned MSB = NumBits / 8;
3018  uint64_t Val = 0;
3019  if (TLI.isLittleEndian())
3020    Offset = Offset + MSB - 1;
3021  for (unsigned i = 0; i != MSB; ++i) {
3022    Val = (Val << 8) | (unsigned char)Str[Offset];
3023    Offset += TLI.isLittleEndian() ? -1 : 1;
3024  }
3025  return DAG.getConstant(Val, VT);
3026}
3027
3028/// getMemBasePlusOffset - Returns base and offset node for the
3029///
3030static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset,
3031                                      SelectionDAG &DAG) {
3032  MVT VT = Base.getValueType();
3033  return DAG.getNode(ISD::ADD, Base.getDebugLoc(),
3034                     VT, Base, DAG.getConstant(Offset, VT));
3035}
3036
3037/// isMemSrcFromString - Returns true if memcpy source is a string constant.
3038///
3039static bool isMemSrcFromString(SDValue Src, std::string &Str) {
3040  unsigned SrcDelta = 0;
3041  GlobalAddressSDNode *G = NULL;
3042  if (Src.getOpcode() == ISD::GlobalAddress)
3043    G = cast<GlobalAddressSDNode>(Src);
3044  else if (Src.getOpcode() == ISD::ADD &&
3045           Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3046           Src.getOperand(1).getOpcode() == ISD::Constant) {
3047    G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3048    SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3049  }
3050  if (!G)
3051    return false;
3052
3053  GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
3054  if (GV && GetConstantStringInfo(GV, Str, SrcDelta, false))
3055    return true;
3056
3057  return false;
3058}
3059
3060/// MeetsMaxMemopRequirement - Determines if the number of memory ops required
3061/// to replace the memset / memcpy is below the threshold. It also returns the
3062/// types of the sequence of memory ops to perform memset / memcpy.
3063static
3064bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
3065                              SDValue Dst, SDValue Src,
3066                              unsigned Limit, uint64_t Size, unsigned &Align,
3067                              std::string &Str, bool &isSrcStr,
3068                              SelectionDAG &DAG,
3069                              const TargetLowering &TLI) {
3070  isSrcStr = isMemSrcFromString(Src, Str);
3071  bool isSrcConst = isa<ConstantSDNode>(Src);
3072  bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses();
3073  MVT VT = TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr, DAG);
3074  if (VT != MVT::iAny) {
3075    unsigned NewAlign = (unsigned)
3076      TLI.getTargetData()->getABITypeAlignment(VT.getTypeForMVT());
3077    // If source is a string constant, this will require an unaligned load.
3078    if (NewAlign > Align && (isSrcConst || AllowUnalign)) {
3079      if (Dst.getOpcode() != ISD::FrameIndex) {
3080        // Can't change destination alignment. It requires a unaligned store.
3081        if (AllowUnalign)
3082          VT = MVT::iAny;
3083      } else {
3084        int FI = cast<FrameIndexSDNode>(Dst)->getIndex();
3085        MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3086        if (MFI->isFixedObjectIndex(FI)) {
3087          // Can't change destination alignment. It requires a unaligned store.
3088          if (AllowUnalign)
3089            VT = MVT::iAny;
3090        } else {
3091          // Give the stack frame object a larger alignment if needed.
3092          if (MFI->getObjectAlignment(FI) < NewAlign)
3093            MFI->setObjectAlignment(FI, NewAlign);
3094          Align = NewAlign;
3095        }
3096      }
3097    }
3098  }
3099
3100  if (VT == MVT::iAny) {
3101    if (AllowUnalign) {
3102      VT = MVT::i64;
3103    } else {
3104      switch (Align & 7) {
3105      case 0:  VT = MVT::i64; break;
3106      case 4:  VT = MVT::i32; break;
3107      case 2:  VT = MVT::i16; break;
3108      default: VT = MVT::i8;  break;
3109      }
3110    }
3111
3112    MVT LVT = MVT::i64;
3113    while (!TLI.isTypeLegal(LVT))
3114      LVT = (MVT::SimpleValueType)(LVT.getSimpleVT() - 1);
3115    assert(LVT.isInteger());
3116
3117    if (VT.bitsGT(LVT))
3118      VT = LVT;
3119  }
3120
3121  unsigned NumMemOps = 0;
3122  while (Size != 0) {
3123    unsigned VTSize = VT.getSizeInBits() / 8;
3124    while (VTSize > Size) {
3125      // For now, only use non-vector load / store's for the left-over pieces.
3126      if (VT.isVector()) {
3127        VT = MVT::i64;
3128        while (!TLI.isTypeLegal(VT))
3129          VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1);
3130        VTSize = VT.getSizeInBits() / 8;
3131      } else {
3132        // This can result in a type that is not legal on the target, e.g.
3133        // 1 or 2 bytes on PPC.
3134        VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1);
3135        VTSize >>= 1;
3136      }
3137    }
3138
3139    if (++NumMemOps > Limit)
3140      return false;
3141    MemOps.push_back(VT);
3142    Size -= VTSize;
3143  }
3144
3145  return true;
3146}
3147
3148static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3149                                         SDValue Chain, SDValue Dst,
3150                                         SDValue Src, uint64_t Size,
3151                                         unsigned Align, bool AlwaysInline,
3152                                         const Value *DstSV, uint64_t DstSVOff,
3153                                         const Value *SrcSV, uint64_t SrcSVOff){
3154  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3155
3156  // Expand memcpy to a series of load and store ops if the size operand falls
3157  // below a certain threshold.
3158  std::vector<MVT> MemOps;
3159  uint64_t Limit = -1ULL;
3160  if (!AlwaysInline)
3161    Limit = TLI.getMaxStoresPerMemcpy();
3162  unsigned DstAlign = Align;  // Destination alignment can change.
3163  std::string Str;
3164  bool CopyFromStr;
3165  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
3166                                Str, CopyFromStr, DAG, TLI))
3167    return SDValue();
3168
3169
3170  bool isZeroStr = CopyFromStr && Str.empty();
3171  SmallVector<SDValue, 8> OutChains;
3172  unsigned NumMemOps = MemOps.size();
3173  uint64_t SrcOff = 0, DstOff = 0;
3174  for (unsigned i = 0; i < NumMemOps; i++) {
3175    MVT VT = MemOps[i];
3176    unsigned VTSize = VT.getSizeInBits() / 8;
3177    SDValue Value, Store;
3178
3179    if (CopyFromStr && (isZeroStr || !VT.isVector())) {
3180      // It's unlikely a store of a vector immediate can be done in a single
3181      // instruction. It would require a load from a constantpool first.
3182      // We also handle store a vector with all zero's.
3183      // FIXME: Handle other cases where store of vector immediate is done in
3184      // a single instruction.
3185      Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
3186      Store = DAG.getStore(Chain, dl, Value,
3187                           getMemBasePlusOffset(Dst, DstOff, DAG),
3188                           DstSV, DstSVOff + DstOff, false, DstAlign);
3189    } else {
3190      // The type might not be legal for the target.  This should only happen
3191      // if the type is smaller than a legal type, as on PPC, so the right
3192      // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
3193      // to Load/Store if NVT==VT.
3194      // FIXME does the case above also need this?
3195      MVT NVT = TLI.getTypeToTransformTo(VT);
3196      assert(NVT.bitsGE(VT));
3197      Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3198                             getMemBasePlusOffset(Src, SrcOff, DAG),
3199                             SrcSV, SrcSVOff + SrcOff, VT, false, Align);
3200      Store = DAG.getTruncStore(Chain, dl, Value,
3201                             getMemBasePlusOffset(Dst, DstOff, DAG),
3202                             DstSV, DstSVOff + DstOff, VT, false, DstAlign);
3203    }
3204    OutChains.push_back(Store);
3205    SrcOff += VTSize;
3206    DstOff += VTSize;
3207  }
3208
3209  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3210                     &OutChains[0], OutChains.size());
3211}
3212
3213static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3214                                          SDValue Chain, SDValue Dst,
3215                                          SDValue Src, uint64_t Size,
3216                                          unsigned Align, bool AlwaysInline,
3217                                          const Value *DstSV, uint64_t DstSVOff,
3218                                          const Value *SrcSV, uint64_t SrcSVOff){
3219  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3220
3221  // Expand memmove to a series of load and store ops if the size operand falls
3222  // below a certain threshold.
3223  std::vector<MVT> MemOps;
3224  uint64_t Limit = -1ULL;
3225  if (!AlwaysInline)
3226    Limit = TLI.getMaxStoresPerMemmove();
3227  unsigned DstAlign = Align;  // Destination alignment can change.
3228  std::string Str;
3229  bool CopyFromStr;
3230  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
3231                                Str, CopyFromStr, DAG, TLI))
3232    return SDValue();
3233
3234  uint64_t SrcOff = 0, DstOff = 0;
3235
3236  SmallVector<SDValue, 8> LoadValues;
3237  SmallVector<SDValue, 8> LoadChains;
3238  SmallVector<SDValue, 8> OutChains;
3239  unsigned NumMemOps = MemOps.size();
3240  for (unsigned i = 0; i < NumMemOps; i++) {
3241    MVT VT = MemOps[i];
3242    unsigned VTSize = VT.getSizeInBits() / 8;
3243    SDValue Value, Store;
3244
3245    Value = DAG.getLoad(VT, dl, Chain,
3246                        getMemBasePlusOffset(Src, SrcOff, DAG),
3247                        SrcSV, SrcSVOff + SrcOff, false, Align);
3248    LoadValues.push_back(Value);
3249    LoadChains.push_back(Value.getValue(1));
3250    SrcOff += VTSize;
3251  }
3252  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3253                      &LoadChains[0], LoadChains.size());
3254  OutChains.clear();
3255  for (unsigned i = 0; i < NumMemOps; i++) {
3256    MVT VT = MemOps[i];
3257    unsigned VTSize = VT.getSizeInBits() / 8;
3258    SDValue Value, Store;
3259
3260    Store = DAG.getStore(Chain, dl, LoadValues[i],
3261                         getMemBasePlusOffset(Dst, DstOff, DAG),
3262                         DstSV, DstSVOff + DstOff, false, DstAlign);
3263    OutChains.push_back(Store);
3264    DstOff += VTSize;
3265  }
3266
3267  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3268                     &OutChains[0], OutChains.size());
3269}
3270
3271static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
3272                                 SDValue Chain, SDValue Dst,
3273                                 SDValue Src, uint64_t Size,
3274                                 unsigned Align,
3275                                 const Value *DstSV, uint64_t DstSVOff) {
3276  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3277
3278  // Expand memset to a series of load/store ops if the size operand
3279  // falls below a certain threshold.
3280  std::vector<MVT> MemOps;
3281  std::string Str;
3282  bool CopyFromStr;
3283  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(),
3284                                Size, Align, Str, CopyFromStr, DAG, TLI))
3285    return SDValue();
3286
3287  SmallVector<SDValue, 8> OutChains;
3288  uint64_t DstOff = 0;
3289
3290  unsigned NumMemOps = MemOps.size();
3291  for (unsigned i = 0; i < NumMemOps; i++) {
3292    MVT VT = MemOps[i];
3293    unsigned VTSize = VT.getSizeInBits() / 8;
3294    SDValue Value = getMemsetValue(Src, VT, DAG, dl);
3295    SDValue Store = DAG.getStore(Chain, dl, Value,
3296                                 getMemBasePlusOffset(Dst, DstOff, DAG),
3297                                 DstSV, DstSVOff + DstOff);
3298    OutChains.push_back(Store);
3299    DstOff += VTSize;
3300  }
3301
3302  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3303                     &OutChains[0], OutChains.size());
3304}
3305
3306SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
3307                                SDValue Src, SDValue Size,
3308                                unsigned Align, bool AlwaysInline,
3309                                const Value *DstSV, uint64_t DstSVOff,
3310                                const Value *SrcSV, uint64_t SrcSVOff) {
3311
3312  // Check to see if we should lower the memcpy to loads and stores first.
3313  // For cases within the target-specified limits, this is the best choice.
3314  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3315  if (ConstantSize) {
3316    // Memcpy with size zero? Just return the original chain.
3317    if (ConstantSize->isNullValue())
3318      return Chain;
3319
3320    SDValue Result =
3321      getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3322                              ConstantSize->getZExtValue(),
3323                              Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
3324    if (Result.getNode())
3325      return Result;
3326  }
3327
3328  // Then check to see if we should lower the memcpy with target-specific
3329  // code. If the target chooses to do this, this is the next best.
3330  SDValue Result =
3331    TLI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
3332                                AlwaysInline,
3333                                DstSV, DstSVOff, SrcSV, SrcSVOff);
3334  if (Result.getNode())
3335    return Result;
3336
3337  // If we really need inline code and the target declined to provide it,
3338  // use a (potentially long) sequence of loads and stores.
3339  if (AlwaysInline) {
3340    assert(ConstantSize && "AlwaysInline requires a constant size!");
3341    return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3342                                   ConstantSize->getZExtValue(), Align, true,
3343                                   DstSV, DstSVOff, SrcSV, SrcSVOff);
3344  }
3345
3346  // Emit a library call.
3347  TargetLowering::ArgListTy Args;
3348  TargetLowering::ArgListEntry Entry;
3349  Entry.Ty = TLI.getTargetData()->getIntPtrType();
3350  Entry.Node = Dst; Args.push_back(Entry);
3351  Entry.Node = Src; Args.push_back(Entry);
3352  Entry.Node = Size; Args.push_back(Entry);
3353  // FIXME: pass in DebugLoc
3354  std::pair<SDValue,SDValue> CallResult =
3355    TLI.LowerCallTo(Chain, Type::VoidTy,
3356                    false, false, false, false, CallingConv::C, false,
3357                    getExternalSymbol("memcpy", TLI.getPointerTy()),
3358                    Args, *this, dl);
3359  return CallResult.second;
3360}
3361
3362SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
3363                                 SDValue Src, SDValue Size,
3364                                 unsigned Align,
3365                                 const Value *DstSV, uint64_t DstSVOff,
3366                                 const Value *SrcSV, uint64_t SrcSVOff) {
3367
3368  // Check to see if we should lower the memmove to loads and stores first.
3369  // For cases within the target-specified limits, this is the best choice.
3370  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3371  if (ConstantSize) {
3372    // Memmove with size zero? Just return the original chain.
3373    if (ConstantSize->isNullValue())
3374      return Chain;
3375
3376    SDValue Result =
3377      getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
3378                               ConstantSize->getZExtValue(),
3379                               Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
3380    if (Result.getNode())
3381      return Result;
3382  }
3383
3384  // Then check to see if we should lower the memmove with target-specific
3385  // code. If the target chooses to do this, this is the next best.
3386  SDValue Result =
3387    TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align,
3388                                 DstSV, DstSVOff, SrcSV, SrcSVOff);
3389  if (Result.getNode())
3390    return Result;
3391
3392  // Emit a library call.
3393  TargetLowering::ArgListTy Args;
3394  TargetLowering::ArgListEntry Entry;
3395  Entry.Ty = TLI.getTargetData()->getIntPtrType();
3396  Entry.Node = Dst; Args.push_back(Entry);
3397  Entry.Node = Src; Args.push_back(Entry);
3398  Entry.Node = Size; Args.push_back(Entry);
3399  // FIXME:  pass in DebugLoc
3400  std::pair<SDValue,SDValue> CallResult =
3401    TLI.LowerCallTo(Chain, Type::VoidTy,
3402                    false, false, false, false, CallingConv::C, false,
3403                    getExternalSymbol("memmove", TLI.getPointerTy()),
3404                    Args, *this, dl);
3405  return CallResult.second;
3406}
3407
3408SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
3409                                SDValue Src, SDValue Size,
3410                                unsigned Align,
3411                                const Value *DstSV, uint64_t DstSVOff) {
3412
3413  // Check to see if we should lower the memset to stores first.
3414  // For cases within the target-specified limits, this is the best choice.
3415  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3416  if (ConstantSize) {
3417    // Memset with size zero? Just return the original chain.
3418    if (ConstantSize->isNullValue())
3419      return Chain;
3420
3421    SDValue Result =
3422      getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
3423                      Align, DstSV, DstSVOff);
3424    if (Result.getNode())
3425      return Result;
3426  }
3427
3428  // Then check to see if we should lower the memset with target-specific
3429  // code. If the target chooses to do this, this is the next best.
3430  SDValue Result =
3431    TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align,
3432                                DstSV, DstSVOff);
3433  if (Result.getNode())
3434    return Result;
3435
3436  // Emit a library call.
3437  const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType();
3438  TargetLowering::ArgListTy Args;
3439  TargetLowering::ArgListEntry Entry;
3440  Entry.Node = Dst; Entry.Ty = IntPtrTy;
3441  Args.push_back(Entry);
3442  // Extend or truncate the argument to be an i32 value for the call.
3443  if (Src.getValueType().bitsGT(MVT::i32))
3444    Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
3445  else
3446    Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
3447  Entry.Node = Src; Entry.Ty = Type::Int32Ty; Entry.isSExt = true;
3448  Args.push_back(Entry);
3449  Entry.Node = Size; Entry.Ty = IntPtrTy; Entry.isSExt = false;
3450  Args.push_back(Entry);
3451  // FIXME: pass in DebugLoc
3452  std::pair<SDValue,SDValue> CallResult =
3453    TLI.LowerCallTo(Chain, Type::VoidTy,
3454                    false, false, false, false, CallingConv::C, false,
3455                    getExternalSymbol("memset", TLI.getPointerTy()),
3456                    Args, *this, dl);
3457  return CallResult.second;
3458}
3459
3460SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
3461                                SDValue Chain,
3462                                SDValue Ptr, SDValue Cmp,
3463                                SDValue Swp, const Value* PtrVal,
3464                                unsigned Alignment) {
3465  assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
3466  assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
3467
3468  MVT VT = Cmp.getValueType();
3469
3470  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3471    Alignment = getMVTAlignment(MemVT);
3472
3473  SDVTList VTs = getVTList(VT, MVT::Other);
3474  FoldingSetNodeID ID;
3475  ID.AddInteger(MemVT.getRawBits());
3476  SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
3477  AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
3478  void* IP = 0;
3479  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3480    return SDValue(E, 0);
3481  SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
3482  new (N) AtomicSDNode(Opcode, dl, VTs, MemVT,
3483                       Chain, Ptr, Cmp, Swp, PtrVal, Alignment);
3484  CSEMap.InsertNode(N, IP);
3485  AllNodes.push_back(N);
3486  return SDValue(N, 0);
3487}
3488
3489SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
3490                                SDValue Chain,
3491                                SDValue Ptr, SDValue Val,
3492                                const Value* PtrVal,
3493                                unsigned Alignment) {
3494  assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
3495          Opcode == ISD::ATOMIC_LOAD_SUB ||
3496          Opcode == ISD::ATOMIC_LOAD_AND ||
3497          Opcode == ISD::ATOMIC_LOAD_OR ||
3498          Opcode == ISD::ATOMIC_LOAD_XOR ||
3499          Opcode == ISD::ATOMIC_LOAD_NAND ||
3500          Opcode == ISD::ATOMIC_LOAD_MIN ||
3501          Opcode == ISD::ATOMIC_LOAD_MAX ||
3502          Opcode == ISD::ATOMIC_LOAD_UMIN ||
3503          Opcode == ISD::ATOMIC_LOAD_UMAX ||
3504          Opcode == ISD::ATOMIC_SWAP) &&
3505         "Invalid Atomic Op");
3506
3507  MVT VT = Val.getValueType();
3508
3509  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3510    Alignment = getMVTAlignment(MemVT);
3511
3512  SDVTList VTs = getVTList(VT, MVT::Other);
3513  FoldingSetNodeID ID;
3514  ID.AddInteger(MemVT.getRawBits());
3515  SDValue Ops[] = {Chain, Ptr, Val};
3516  AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3517  void* IP = 0;
3518  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3519    return SDValue(E, 0);
3520  SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
3521  new (N) AtomicSDNode(Opcode, dl, VTs, MemVT,
3522                       Chain, Ptr, Val, PtrVal, Alignment);
3523  CSEMap.InsertNode(N, IP);
3524  AllNodes.push_back(N);
3525  return SDValue(N, 0);
3526}
3527
3528/// getMergeValues - Create a MERGE_VALUES node from the given operands.
3529/// Allowed to return something different (and simpler) if Simplify is true.
3530SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
3531                                     DebugLoc dl) {
3532  if (NumOps == 1)
3533    return Ops[0];
3534
3535  SmallVector<MVT, 4> VTs;
3536  VTs.reserve(NumOps);
3537  for (unsigned i = 0; i < NumOps; ++i)
3538    VTs.push_back(Ops[i].getValueType());
3539  return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
3540                 Ops, NumOps);
3541}
3542
3543SDValue
3544SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
3545                                  const MVT *VTs, unsigned NumVTs,
3546                                  const SDValue *Ops, unsigned NumOps,
3547                                  MVT MemVT, const Value *srcValue, int SVOff,
3548                                  unsigned Align, bool Vol,
3549                                  bool ReadMem, bool WriteMem) {
3550  return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
3551                             MemVT, srcValue, SVOff, Align, Vol,
3552                             ReadMem, WriteMem);
3553}
3554
3555SDValue
3556SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
3557                                  const SDValue *Ops, unsigned NumOps,
3558                                  MVT MemVT, const Value *srcValue, int SVOff,
3559                                  unsigned Align, bool Vol,
3560                                  bool ReadMem, bool WriteMem) {
3561  // Memoize the node unless it returns a flag.
3562  MemIntrinsicSDNode *N;
3563  if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
3564    FoldingSetNodeID ID;
3565    AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
3566    void *IP = 0;
3567    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3568      return SDValue(E, 0);
3569
3570    N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
3571    new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT,
3572                               srcValue, SVOff, Align, Vol, ReadMem, WriteMem);
3573    CSEMap.InsertNode(N, IP);
3574  } else {
3575    N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
3576    new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT,
3577                               srcValue, SVOff, Align, Vol, ReadMem, WriteMem);
3578  }
3579  AllNodes.push_back(N);
3580  return SDValue(N, 0);
3581}
3582
3583SDValue
3584SelectionDAG::getCall(unsigned CallingConv, DebugLoc dl, bool IsVarArgs,
3585                      bool IsTailCall, bool IsInreg, SDVTList VTs,
3586                      const SDValue *Operands, unsigned NumOperands) {
3587  // Do not include isTailCall in the folding set profile.
3588  FoldingSetNodeID ID;
3589  AddNodeIDNode(ID, ISD::CALL, VTs, Operands, NumOperands);
3590  ID.AddInteger(CallingConv);
3591  ID.AddInteger(IsVarArgs);
3592  void *IP = 0;
3593  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3594    // Instead of including isTailCall in the folding set, we just
3595    // set the flag of the existing node.
3596    if (!IsTailCall)
3597      cast<CallSDNode>(E)->setNotTailCall();
3598    return SDValue(E, 0);
3599  }
3600  SDNode *N = NodeAllocator.Allocate<CallSDNode>();
3601  new (N) CallSDNode(CallingConv, dl, IsVarArgs, IsTailCall, IsInreg,
3602                     VTs, Operands, NumOperands);
3603  CSEMap.InsertNode(N, IP);
3604  AllNodes.push_back(N);
3605  return SDValue(N, 0);
3606}
3607
3608SDValue
3609SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
3610                      ISD::LoadExtType ExtType, MVT VT, SDValue Chain,
3611                      SDValue Ptr, SDValue Offset,
3612                      const Value *SV, int SVOffset, MVT EVT,
3613                      bool isVolatile, unsigned Alignment) {
3614  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3615    Alignment = getMVTAlignment(VT);
3616
3617  if (VT == EVT) {
3618    ExtType = ISD::NON_EXTLOAD;
3619  } else if (ExtType == ISD::NON_EXTLOAD) {
3620    assert(VT == EVT && "Non-extending load from different memory type!");
3621  } else {
3622    // Extending load.
3623    if (VT.isVector())
3624      assert(EVT.getVectorNumElements() == VT.getVectorNumElements() &&
3625             "Invalid vector extload!");
3626    else
3627      assert(EVT.bitsLT(VT) &&
3628             "Should only be an extending load, not truncating!");
3629    assert((ExtType == ISD::EXTLOAD || VT.isInteger()) &&
3630           "Cannot sign/zero extend a FP/Vector load!");
3631    assert(VT.isInteger() == EVT.isInteger() &&
3632           "Cannot convert from FP to Int or Int -> FP!");
3633  }
3634
3635  bool Indexed = AM != ISD::UNINDEXED;
3636  assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
3637         "Unindexed load with an offset!");
3638
3639  SDVTList VTs = Indexed ?
3640    getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
3641  SDValue Ops[] = { Chain, Ptr, Offset };
3642  FoldingSetNodeID ID;
3643  AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
3644  ID.AddInteger(EVT.getRawBits());
3645  ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, isVolatile, Alignment));
3646  void *IP = 0;
3647  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3648    return SDValue(E, 0);
3649  SDNode *N = NodeAllocator.Allocate<LoadSDNode>();
3650  new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, EVT, SV, SVOffset,
3651                     Alignment, isVolatile);
3652  CSEMap.InsertNode(N, IP);
3653  AllNodes.push_back(N);
3654  return SDValue(N, 0);
3655}
3656
3657SDValue SelectionDAG::getLoad(MVT VT, DebugLoc dl,
3658                              SDValue Chain, SDValue Ptr,
3659                              const Value *SV, int SVOffset,
3660                              bool isVolatile, unsigned Alignment) {
3661  SDValue Undef = getUNDEF(Ptr.getValueType());
3662  return getLoad(ISD::UNINDEXED, dl, ISD::NON_EXTLOAD, VT, Chain, Ptr, Undef,
3663                 SV, SVOffset, VT, isVolatile, Alignment);
3664}
3665
3666SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, MVT VT,
3667                                 SDValue Chain, SDValue Ptr,
3668                                 const Value *SV,
3669                                 int SVOffset, MVT EVT,
3670                                 bool isVolatile, unsigned Alignment) {
3671  SDValue Undef = getUNDEF(Ptr.getValueType());
3672  return getLoad(ISD::UNINDEXED, dl, ExtType, VT, Chain, Ptr, Undef,
3673                 SV, SVOffset, EVT, isVolatile, Alignment);
3674}
3675
3676SDValue
3677SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
3678                             SDValue Offset, ISD::MemIndexedMode AM) {
3679  LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
3680  assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
3681         "Load is already a indexed load!");
3682  return getLoad(AM, dl, LD->getExtensionType(), OrigLoad.getValueType(),
3683                 LD->getChain(), Base, Offset, LD->getSrcValue(),
3684                 LD->getSrcValueOffset(), LD->getMemoryVT(),
3685                 LD->isVolatile(), LD->getAlignment());
3686}
3687
3688SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
3689                               SDValue Ptr, const Value *SV, int SVOffset,
3690                               bool isVolatile, unsigned Alignment) {
3691  MVT VT = Val.getValueType();
3692
3693  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3694    Alignment = getMVTAlignment(VT);
3695
3696  SDVTList VTs = getVTList(MVT::Other);
3697  SDValue Undef = getUNDEF(Ptr.getValueType());
3698  SDValue Ops[] = { Chain, Val, Ptr, Undef };
3699  FoldingSetNodeID ID;
3700  AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
3701  ID.AddInteger(VT.getRawBits());
3702  ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED,
3703                                     isVolatile, Alignment));
3704  void *IP = 0;
3705  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3706    return SDValue(E, 0);
3707  SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
3708  new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false,
3709                      VT, SV, SVOffset, Alignment, isVolatile);
3710  CSEMap.InsertNode(N, IP);
3711  AllNodes.push_back(N);
3712  return SDValue(N, 0);
3713}
3714
3715SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
3716                                    SDValue Ptr, const Value *SV,
3717                                    int SVOffset, MVT SVT,
3718                                    bool isVolatile, unsigned Alignment) {
3719  MVT VT = Val.getValueType();
3720
3721  if (VT == SVT)
3722    return getStore(Chain, dl, Val, Ptr, SV, SVOffset, isVolatile, Alignment);
3723
3724  assert(VT.bitsGT(SVT) && "Not a truncation?");
3725  assert(VT.isInteger() == SVT.isInteger() &&
3726         "Can't do FP-INT conversion!");
3727
3728  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3729    Alignment = getMVTAlignment(VT);
3730
3731  SDVTList VTs = getVTList(MVT::Other);
3732  SDValue Undef = getUNDEF(Ptr.getValueType());
3733  SDValue Ops[] = { Chain, Val, Ptr, Undef };
3734  FoldingSetNodeID ID;
3735  AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
3736  ID.AddInteger(SVT.getRawBits());
3737  ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED,
3738                                     isVolatile, Alignment));
3739  void *IP = 0;
3740  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3741    return SDValue(E, 0);
3742  SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
3743  new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true,
3744                      SVT, SV, SVOffset, Alignment, isVolatile);
3745  CSEMap.InsertNode(N, IP);
3746  AllNodes.push_back(N);
3747  return SDValue(N, 0);
3748}
3749
3750SDValue
3751SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
3752                              SDValue Offset, ISD::MemIndexedMode AM) {
3753  StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
3754  assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
3755         "Store is already a indexed store!");
3756  SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
3757  SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
3758  FoldingSetNodeID ID;
3759  AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
3760  ID.AddInteger(ST->getMemoryVT().getRawBits());
3761  ID.AddInteger(ST->getRawSubclassData());
3762  void *IP = 0;
3763  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3764    return SDValue(E, 0);
3765  SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
3766  new (N) StoreSDNode(Ops, dl, VTs, AM,
3767                      ST->isTruncatingStore(), ST->getMemoryVT(),
3768                      ST->getSrcValue(), ST->getSrcValueOffset(),
3769                      ST->getAlignment(), ST->isVolatile());
3770  CSEMap.InsertNode(N, IP);
3771  AllNodes.push_back(N);
3772  return SDValue(N, 0);
3773}
3774
3775SDValue SelectionDAG::getVAArg(MVT VT, DebugLoc dl,
3776                               SDValue Chain, SDValue Ptr,
3777                               SDValue SV) {
3778  SDValue Ops[] = { Chain, Ptr, SV };
3779  return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
3780}
3781
3782SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
3783                              const SDUse *Ops, unsigned NumOps) {
3784  switch (NumOps) {
3785  case 0: return getNode(Opcode, DL, VT);
3786  case 1: return getNode(Opcode, DL, VT, Ops[0]);
3787  case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
3788  case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
3789  default: break;
3790  }
3791
3792  // Copy from an SDUse array into an SDValue array for use with
3793  // the regular getNode logic.
3794  SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
3795  return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
3796}
3797
3798SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
3799                              const SDValue *Ops, unsigned NumOps) {
3800  switch (NumOps) {
3801  case 0: return getNode(Opcode, DL, VT);
3802  case 1: return getNode(Opcode, DL, VT, Ops[0]);
3803  case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
3804  case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
3805  default: break;
3806  }
3807
3808  switch (Opcode) {
3809  default: break;
3810  case ISD::SELECT_CC: {
3811    assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
3812    assert(Ops[0].getValueType() == Ops[1].getValueType() &&
3813           "LHS and RHS of condition must have same type!");
3814    assert(Ops[2].getValueType() == Ops[3].getValueType() &&
3815           "True and False arms of SelectCC must have same type!");
3816    assert(Ops[2].getValueType() == VT &&
3817           "select_cc node must be of same type as true and false value!");
3818    break;
3819  }
3820  case ISD::BR_CC: {
3821    assert(NumOps == 5 && "BR_CC takes 5 operands!");
3822    assert(Ops[2].getValueType() == Ops[3].getValueType() &&
3823           "LHS/RHS of comparison should match types!");
3824    break;
3825  }
3826  }
3827
3828  // Memoize nodes.
3829  SDNode *N;
3830  SDVTList VTs = getVTList(VT);
3831
3832  if (VT != MVT::Flag) {
3833    FoldingSetNodeID ID;
3834    AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
3835    void *IP = 0;
3836
3837    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3838      return SDValue(E, 0);
3839
3840    N = NodeAllocator.Allocate<SDNode>();
3841    new (N) SDNode(Opcode, DL, VTs, Ops, NumOps);
3842    CSEMap.InsertNode(N, IP);
3843  } else {
3844    N = NodeAllocator.Allocate<SDNode>();
3845    new (N) SDNode(Opcode, DL, VTs, Ops, NumOps);
3846  }
3847
3848  AllNodes.push_back(N);
3849#ifndef NDEBUG
3850  VerifyNode(N);
3851#endif
3852  return SDValue(N, 0);
3853}
3854
3855SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
3856                              const std::vector<MVT> &ResultTys,
3857                              const SDValue *Ops, unsigned NumOps) {
3858  return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
3859                 Ops, NumOps);
3860}
3861
3862SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
3863                              const MVT *VTs, unsigned NumVTs,
3864                              const SDValue *Ops, unsigned NumOps) {
3865  if (NumVTs == 1)
3866    return getNode(Opcode, DL, VTs[0], Ops, NumOps);
3867  return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
3868}
3869
3870SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
3871                              const SDValue *Ops, unsigned NumOps) {
3872  if (VTList.NumVTs == 1)
3873    return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
3874
3875  switch (Opcode) {
3876  // FIXME: figure out how to safely handle things like
3877  // int foo(int x) { return 1 << (x & 255); }
3878  // int bar() { return foo(256); }
3879#if 0
3880  case ISD::SRA_PARTS:
3881  case ISD::SRL_PARTS:
3882  case ISD::SHL_PARTS:
3883    if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3884        cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
3885      return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
3886    else if (N3.getOpcode() == ISD::AND)
3887      if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
3888        // If the and is only masking out bits that cannot effect the shift,
3889        // eliminate the and.
3890        unsigned NumBits = VT.getSizeInBits()*2;
3891        if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
3892          return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
3893      }
3894    break;
3895#endif
3896  }
3897
3898  // Memoize the node unless it returns a flag.
3899  SDNode *N;
3900  if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
3901    FoldingSetNodeID ID;
3902    AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
3903    void *IP = 0;
3904    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3905      return SDValue(E, 0);
3906    if (NumOps == 1) {
3907      N = NodeAllocator.Allocate<UnarySDNode>();
3908      new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]);
3909    } else if (NumOps == 2) {
3910      N = NodeAllocator.Allocate<BinarySDNode>();
3911      new (N) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
3912    } else if (NumOps == 3) {
3913      N = NodeAllocator.Allocate<TernarySDNode>();
3914      new (N) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], Ops[2]);
3915    } else {
3916      N = NodeAllocator.Allocate<SDNode>();
3917      new (N) SDNode(Opcode, DL, VTList, Ops, NumOps);
3918    }
3919    CSEMap.InsertNode(N, IP);
3920  } else {
3921    if (NumOps == 1) {
3922      N = NodeAllocator.Allocate<UnarySDNode>();
3923      new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]);
3924    } else if (NumOps == 2) {
3925      N = NodeAllocator.Allocate<BinarySDNode>();
3926      new (N) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
3927    } else if (NumOps == 3) {
3928      N = NodeAllocator.Allocate<TernarySDNode>();
3929      new (N) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], Ops[2]);
3930    } else {
3931      N = NodeAllocator.Allocate<SDNode>();
3932      new (N) SDNode(Opcode, DL, VTList, Ops, NumOps);
3933    }
3934  }
3935  AllNodes.push_back(N);
3936#ifndef NDEBUG
3937  VerifyNode(N);
3938#endif
3939  return SDValue(N, 0);
3940}
3941
3942SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList) {
3943  return getNode(Opcode, DL, VTList, 0, 0);
3944}
3945
3946SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
3947                              SDValue N1) {
3948  SDValue Ops[] = { N1 };
3949  return getNode(Opcode, DL, VTList, Ops, 1);
3950}
3951
3952SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
3953                              SDValue N1, SDValue N2) {
3954  SDValue Ops[] = { N1, N2 };
3955  return getNode(Opcode, DL, VTList, Ops, 2);
3956}
3957
3958SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
3959                              SDValue N1, SDValue N2, SDValue N3) {
3960  SDValue Ops[] = { N1, N2, N3 };
3961  return getNode(Opcode, DL, VTList, Ops, 3);
3962}
3963
3964SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
3965                              SDValue N1, SDValue N2, SDValue N3,
3966                              SDValue N4) {
3967  SDValue Ops[] = { N1, N2, N3, N4 };
3968  return getNode(Opcode, DL, VTList, Ops, 4);
3969}
3970
3971SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
3972                              SDValue N1, SDValue N2, SDValue N3,
3973                              SDValue N4, SDValue N5) {
3974  SDValue Ops[] = { N1, N2, N3, N4, N5 };
3975  return getNode(Opcode, DL, VTList, Ops, 5);
3976}
3977
3978SDVTList SelectionDAG::getVTList(MVT VT) {
3979  return makeVTList(SDNode::getValueTypeList(VT), 1);
3980}
3981
3982SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2) {
3983  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
3984       E = VTList.rend(); I != E; ++I)
3985    if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
3986      return *I;
3987
3988  MVT *Array = Allocator.Allocate<MVT>(2);
3989  Array[0] = VT1;
3990  Array[1] = VT2;
3991  SDVTList Result = makeVTList(Array, 2);
3992  VTList.push_back(Result);
3993  return Result;
3994}
3995
3996SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, MVT VT3) {
3997  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
3998       E = VTList.rend(); I != E; ++I)
3999    if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4000                          I->VTs[2] == VT3)
4001      return *I;
4002
4003  MVT *Array = Allocator.Allocate<MVT>(3);
4004  Array[0] = VT1;
4005  Array[1] = VT2;
4006  Array[2] = VT3;
4007  SDVTList Result = makeVTList(Array, 3);
4008  VTList.push_back(Result);
4009  return Result;
4010}
4011
4012SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, MVT VT3, MVT VT4) {
4013  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4014       E = VTList.rend(); I != E; ++I)
4015    if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4016                          I->VTs[2] == VT3 && I->VTs[3] == VT4)
4017      return *I;
4018
4019  MVT *Array = Allocator.Allocate<MVT>(3);
4020  Array[0] = VT1;
4021  Array[1] = VT2;
4022  Array[2] = VT3;
4023  Array[3] = VT4;
4024  SDVTList Result = makeVTList(Array, 4);
4025  VTList.push_back(Result);
4026  return Result;
4027}
4028
4029SDVTList SelectionDAG::getVTList(const MVT *VTs, unsigned NumVTs) {
4030  switch (NumVTs) {
4031    case 0: assert(0 && "Cannot have nodes without results!");
4032    case 1: return getVTList(VTs[0]);
4033    case 2: return getVTList(VTs[0], VTs[1]);
4034    case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
4035    default: break;
4036  }
4037
4038  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4039       E = VTList.rend(); I != E; ++I) {
4040    if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
4041      continue;
4042
4043    bool NoMatch = false;
4044    for (unsigned i = 2; i != NumVTs; ++i)
4045      if (VTs[i] != I->VTs[i]) {
4046        NoMatch = true;
4047        break;
4048      }
4049    if (!NoMatch)
4050      return *I;
4051  }
4052
4053  MVT *Array = Allocator.Allocate<MVT>(NumVTs);
4054  std::copy(VTs, VTs+NumVTs, Array);
4055  SDVTList Result = makeVTList(Array, NumVTs);
4056  VTList.push_back(Result);
4057  return Result;
4058}
4059
4060
4061/// UpdateNodeOperands - *Mutate* the specified node in-place to have the
4062/// specified operands.  If the resultant node already exists in the DAG,
4063/// this does not modify the specified node, instead it returns the node that
4064/// already exists.  If the resultant node does not exist in the DAG, the
4065/// input node is returned.  As a degenerate case, if you specify the same
4066/// input operands as the node already has, the input node is returned.
4067SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
4068  SDNode *N = InN.getNode();
4069  assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
4070
4071  // Check to see if there is no change.
4072  if (Op == N->getOperand(0)) return InN;
4073
4074  // See if the modified node already exists.
4075  void *InsertPos = 0;
4076  if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
4077    return SDValue(Existing, InN.getResNo());
4078
4079  // Nope it doesn't.  Remove the node from its current place in the maps.
4080  if (InsertPos)
4081    if (!RemoveNodeFromCSEMaps(N))
4082      InsertPos = 0;
4083
4084  // Now we update the operands.
4085  N->OperandList[0].set(Op);
4086
4087  // If this gets put into a CSE map, add it.
4088  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4089  return InN;
4090}
4091
4092SDValue SelectionDAG::
4093UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
4094  SDNode *N = InN.getNode();
4095  assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
4096
4097  // Check to see if there is no change.
4098  if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
4099    return InN;   // No operands changed, just return the input node.
4100
4101  // See if the modified node already exists.
4102  void *InsertPos = 0;
4103  if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
4104    return SDValue(Existing, InN.getResNo());
4105
4106  // Nope it doesn't.  Remove the node from its current place in the maps.
4107  if (InsertPos)
4108    if (!RemoveNodeFromCSEMaps(N))
4109      InsertPos = 0;
4110
4111  // Now we update the operands.
4112  if (N->OperandList[0] != Op1)
4113    N->OperandList[0].set(Op1);
4114  if (N->OperandList[1] != Op2)
4115    N->OperandList[1].set(Op2);
4116
4117  // If this gets put into a CSE map, add it.
4118  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4119  return InN;
4120}
4121
4122SDValue SelectionDAG::
4123UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, SDValue Op3) {
4124  SDValue Ops[] = { Op1, Op2, Op3 };
4125  return UpdateNodeOperands(N, Ops, 3);
4126}
4127
4128SDValue SelectionDAG::
4129UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
4130                   SDValue Op3, SDValue Op4) {
4131  SDValue Ops[] = { Op1, Op2, Op3, Op4 };
4132  return UpdateNodeOperands(N, Ops, 4);
4133}
4134
4135SDValue SelectionDAG::
4136UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
4137                   SDValue Op3, SDValue Op4, SDValue Op5) {
4138  SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
4139  return UpdateNodeOperands(N, Ops, 5);
4140}
4141
4142SDValue SelectionDAG::
4143UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
4144  SDNode *N = InN.getNode();
4145  assert(N->getNumOperands() == NumOps &&
4146         "Update with wrong number of operands");
4147
4148  // Check to see if there is no change.
4149  bool AnyChange = false;
4150  for (unsigned i = 0; i != NumOps; ++i) {
4151    if (Ops[i] != N->getOperand(i)) {
4152      AnyChange = true;
4153      break;
4154    }
4155  }
4156
4157  // No operands changed, just return the input node.
4158  if (!AnyChange) return InN;
4159
4160  // See if the modified node already exists.
4161  void *InsertPos = 0;
4162  if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
4163    return SDValue(Existing, InN.getResNo());
4164
4165  // Nope it doesn't.  Remove the node from its current place in the maps.
4166  if (InsertPos)
4167    if (!RemoveNodeFromCSEMaps(N))
4168      InsertPos = 0;
4169
4170  // Now we update the operands.
4171  for (unsigned i = 0; i != NumOps; ++i)
4172    if (N->OperandList[i] != Ops[i])
4173      N->OperandList[i].set(Ops[i]);
4174
4175  // If this gets put into a CSE map, add it.
4176  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4177  return InN;
4178}
4179
4180/// DropOperands - Release the operands and set this node to have
4181/// zero operands.
4182void SDNode::DropOperands() {
4183  // Unlike the code in MorphNodeTo that does this, we don't need to
4184  // watch for dead nodes here.
4185  for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
4186    SDUse &Use = *I++;
4187    Use.set(SDValue());
4188  }
4189}
4190
4191/// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
4192/// machine opcode.
4193///
4194SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4195                                   MVT VT) {
4196  SDVTList VTs = getVTList(VT);
4197  return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
4198}
4199
4200SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4201                                   MVT VT, SDValue Op1) {
4202  SDVTList VTs = getVTList(VT);
4203  SDValue Ops[] = { Op1 };
4204  return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
4205}
4206
4207SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4208                                   MVT VT, SDValue Op1,
4209                                   SDValue Op2) {
4210  SDVTList VTs = getVTList(VT);
4211  SDValue Ops[] = { Op1, Op2 };
4212  return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
4213}
4214
4215SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4216                                   MVT VT, SDValue Op1,
4217                                   SDValue Op2, SDValue Op3) {
4218  SDVTList VTs = getVTList(VT);
4219  SDValue Ops[] = { Op1, Op2, Op3 };
4220  return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
4221}
4222
4223SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4224                                   MVT VT, const SDValue *Ops,
4225                                   unsigned NumOps) {
4226  SDVTList VTs = getVTList(VT);
4227  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4228}
4229
4230SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4231                                   MVT VT1, MVT VT2, const SDValue *Ops,
4232                                   unsigned NumOps) {
4233  SDVTList VTs = getVTList(VT1, VT2);
4234  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4235}
4236
4237SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4238                                   MVT VT1, MVT VT2) {
4239  SDVTList VTs = getVTList(VT1, VT2);
4240  return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
4241}
4242
4243SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4244                                   MVT VT1, MVT VT2, MVT VT3,
4245                                   const SDValue *Ops, unsigned NumOps) {
4246  SDVTList VTs = getVTList(VT1, VT2, VT3);
4247  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4248}
4249
4250SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4251                                   MVT VT1, MVT VT2, MVT VT3, MVT VT4,
4252                                   const SDValue *Ops, unsigned NumOps) {
4253  SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
4254  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4255}
4256
4257SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4258                                   MVT VT1, MVT VT2,
4259                                   SDValue Op1) {
4260  SDVTList VTs = getVTList(VT1, VT2);
4261  SDValue Ops[] = { Op1 };
4262  return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
4263}
4264
4265SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4266                                   MVT VT1, MVT VT2,
4267                                   SDValue Op1, SDValue Op2) {
4268  SDVTList VTs = getVTList(VT1, VT2);
4269  SDValue Ops[] = { Op1, Op2 };
4270  return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
4271}
4272
4273SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4274                                   MVT VT1, MVT VT2,
4275                                   SDValue Op1, SDValue Op2,
4276                                   SDValue Op3) {
4277  SDVTList VTs = getVTList(VT1, VT2);
4278  SDValue Ops[] = { Op1, Op2, Op3 };
4279  return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
4280}
4281
4282SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4283                                   MVT VT1, MVT VT2, MVT VT3,
4284                                   SDValue Op1, SDValue Op2,
4285                                   SDValue Op3) {
4286  SDVTList VTs = getVTList(VT1, VT2, VT3);
4287  SDValue Ops[] = { Op1, Op2, Op3 };
4288  return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
4289}
4290
4291SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4292                                   SDVTList VTs, const SDValue *Ops,
4293                                   unsigned NumOps) {
4294  return MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
4295}
4296
4297SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4298                                  MVT VT) {
4299  SDVTList VTs = getVTList(VT);
4300  return MorphNodeTo(N, Opc, VTs, 0, 0);
4301}
4302
4303SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4304                                  MVT VT, SDValue Op1) {
4305  SDVTList VTs = getVTList(VT);
4306  SDValue Ops[] = { Op1 };
4307  return MorphNodeTo(N, Opc, VTs, Ops, 1);
4308}
4309
4310SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4311                                  MVT VT, SDValue Op1,
4312                                  SDValue Op2) {
4313  SDVTList VTs = getVTList(VT);
4314  SDValue Ops[] = { Op1, Op2 };
4315  return MorphNodeTo(N, Opc, VTs, Ops, 2);
4316}
4317
4318SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4319                                  MVT VT, SDValue Op1,
4320                                  SDValue Op2, SDValue Op3) {
4321  SDVTList VTs = getVTList(VT);
4322  SDValue Ops[] = { Op1, Op2, Op3 };
4323  return MorphNodeTo(N, Opc, VTs, Ops, 3);
4324}
4325
4326SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4327                                  MVT VT, const SDValue *Ops,
4328                                  unsigned NumOps) {
4329  SDVTList VTs = getVTList(VT);
4330  return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
4331}
4332
4333SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4334                                  MVT VT1, MVT VT2, const SDValue *Ops,
4335                                  unsigned NumOps) {
4336  SDVTList VTs = getVTList(VT1, VT2);
4337  return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
4338}
4339
4340SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4341                                  MVT VT1, MVT VT2) {
4342  SDVTList VTs = getVTList(VT1, VT2);
4343  return MorphNodeTo(N, Opc, VTs, (SDValue *)0, 0);
4344}
4345
4346SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4347                                  MVT VT1, MVT VT2, MVT VT3,
4348                                  const SDValue *Ops, unsigned NumOps) {
4349  SDVTList VTs = getVTList(VT1, VT2, VT3);
4350  return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
4351}
4352
4353SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4354                                  MVT VT1, MVT VT2,
4355                                  SDValue Op1) {
4356  SDVTList VTs = getVTList(VT1, VT2);
4357  SDValue Ops[] = { Op1 };
4358  return MorphNodeTo(N, Opc, VTs, Ops, 1);
4359}
4360
4361SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4362                                  MVT VT1, MVT VT2,
4363                                  SDValue Op1, SDValue Op2) {
4364  SDVTList VTs = getVTList(VT1, VT2);
4365  SDValue Ops[] = { Op1, Op2 };
4366  return MorphNodeTo(N, Opc, VTs, Ops, 2);
4367}
4368
4369SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4370                                  MVT VT1, MVT VT2,
4371                                  SDValue Op1, SDValue Op2,
4372                                  SDValue Op3) {
4373  SDVTList VTs = getVTList(VT1, VT2);
4374  SDValue Ops[] = { Op1, Op2, Op3 };
4375  return MorphNodeTo(N, Opc, VTs, Ops, 3);
4376}
4377
4378/// MorphNodeTo - These *mutate* the specified node to have the specified
4379/// return type, opcode, and operands.
4380///
4381/// Note that MorphNodeTo returns the resultant node.  If there is already a
4382/// node of the specified opcode and operands, it returns that node instead of
4383/// the current one.  Note that the DebugLoc need not be the same.
4384///
4385/// Using MorphNodeTo is faster than creating a new node and swapping it in
4386/// with ReplaceAllUsesWith both because it often avoids allocating a new
4387/// node, and because it doesn't require CSE recalculation for any of
4388/// the node's users.
4389///
4390SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4391                                  SDVTList VTs, const SDValue *Ops,
4392                                  unsigned NumOps) {
4393  // If an identical node already exists, use it.
4394  void *IP = 0;
4395  if (VTs.VTs[VTs.NumVTs-1] != MVT::Flag) {
4396    FoldingSetNodeID ID;
4397    AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
4398    if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
4399      return ON;
4400  }
4401
4402  if (!RemoveNodeFromCSEMaps(N))
4403    IP = 0;
4404
4405  // Start the morphing.
4406  N->NodeType = Opc;
4407  N->ValueList = VTs.VTs;
4408  N->NumValues = VTs.NumVTs;
4409
4410  // Clear the operands list, updating used nodes to remove this from their
4411  // use list.  Keep track of any operands that become dead as a result.
4412  SmallPtrSet<SDNode*, 16> DeadNodeSet;
4413  for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
4414    SDUse &Use = *I++;
4415    SDNode *Used = Use.getNode();
4416    Use.set(SDValue());
4417    if (Used->use_empty())
4418      DeadNodeSet.insert(Used);
4419  }
4420
4421  // If NumOps is larger than the # of operands we currently have, reallocate
4422  // the operand list.
4423  if (NumOps > N->NumOperands) {
4424    if (N->OperandsNeedDelete)
4425      delete[] N->OperandList;
4426
4427    if (N->isMachineOpcode()) {
4428      // We're creating a final node that will live unmorphed for the
4429      // remainder of the current SelectionDAG iteration, so we can allocate
4430      // the operands directly out of a pool with no recycling metadata.
4431      N->OperandList = OperandAllocator.Allocate<SDUse>(NumOps);
4432      N->OperandsNeedDelete = false;
4433    } else {
4434      N->OperandList = new SDUse[NumOps];
4435      N->OperandsNeedDelete = true;
4436    }
4437  }
4438
4439  // Assign the new operands.
4440  N->NumOperands = NumOps;
4441  for (unsigned i = 0, e = NumOps; i != e; ++i) {
4442    N->OperandList[i].setUser(N);
4443    N->OperandList[i].setInitial(Ops[i]);
4444  }
4445
4446  // Delete any nodes that are still dead after adding the uses for the
4447  // new operands.
4448  SmallVector<SDNode *, 16> DeadNodes;
4449  for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
4450       E = DeadNodeSet.end(); I != E; ++I)
4451    if ((*I)->use_empty())
4452      DeadNodes.push_back(*I);
4453  RemoveDeadNodes(DeadNodes);
4454
4455  if (IP)
4456    CSEMap.InsertNode(N, IP);   // Memoize the new node.
4457  return N;
4458}
4459
4460
4461/// getTargetNode - These are used for target selectors to create a new node
4462/// with specified return type(s), target opcode, and operands.
4463///
4464/// Note that getTargetNode returns the resultant node.  If there is already a
4465/// node of the specified opcode and operands, it returns that node instead of
4466/// the current one.
4467SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT) {
4468  return getNode(~Opcode, dl, VT).getNode();
4469}
4470
4471SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
4472                                    SDValue Op1) {
4473  return getNode(~Opcode, dl, VT, Op1).getNode();
4474}
4475
4476SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
4477                                    SDValue Op1, SDValue Op2) {
4478  return getNode(~Opcode, dl, VT, Op1, Op2).getNode();
4479}
4480
4481SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
4482                                    SDValue Op1, SDValue Op2,
4483                                    SDValue Op3) {
4484  return getNode(~Opcode, dl, VT, Op1, Op2, Op3).getNode();
4485}
4486
4487SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
4488                                    const SDValue *Ops, unsigned NumOps) {
4489  return getNode(~Opcode, dl, VT, Ops, NumOps).getNode();
4490}
4491
4492SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
4493                                    MVT VT1, MVT VT2) {
4494  SDVTList VTs = getVTList(VT1, VT2);
4495  SDValue Op;
4496  return getNode(~Opcode, dl, VTs, &Op, 0).getNode();
4497}
4498
4499SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
4500                                    MVT VT2, SDValue Op1) {
4501  SDVTList VTs = getVTList(VT1, VT2);
4502  return getNode(~Opcode, dl, VTs, &Op1, 1).getNode();
4503}
4504
4505SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
4506                                    MVT VT2, SDValue Op1,
4507                                    SDValue Op2) {
4508  SDVTList VTs = getVTList(VT1, VT2);
4509  SDValue Ops[] = { Op1, Op2 };
4510  return getNode(~Opcode, dl, VTs, Ops, 2).getNode();
4511}
4512
4513SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
4514                                    MVT VT2, SDValue Op1,
4515                                    SDValue Op2, SDValue Op3) {
4516  SDVTList VTs = getVTList(VT1, VT2);
4517  SDValue Ops[] = { Op1, Op2, Op3 };
4518  return getNode(~Opcode, dl, VTs, Ops, 3).getNode();
4519}
4520
4521SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
4522                                    MVT VT1, MVT VT2,
4523                                    const SDValue *Ops, unsigned NumOps) {
4524  SDVTList VTs = getVTList(VT1, VT2);
4525  return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
4526}
4527
4528SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
4529                                    MVT VT1, MVT VT2, MVT VT3,
4530                                    SDValue Op1, SDValue Op2) {
4531  SDVTList VTs = getVTList(VT1, VT2, VT3);
4532  SDValue Ops[] = { Op1, Op2 };
4533  return getNode(~Opcode, dl, VTs, Ops, 2).getNode();
4534}
4535
4536SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
4537                                    MVT VT1, MVT VT2, MVT VT3,
4538                                    SDValue Op1, SDValue Op2,
4539                                    SDValue Op3) {
4540  SDVTList VTs = getVTList(VT1, VT2, VT3);
4541  SDValue Ops[] = { Op1, Op2, Op3 };
4542  return getNode(~Opcode, dl, VTs, Ops, 3).getNode();
4543}
4544
4545SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
4546                                    MVT VT1, MVT VT2, MVT VT3,
4547                                    const SDValue *Ops, unsigned NumOps) {
4548  SDVTList VTs = getVTList(VT1, VT2, VT3);
4549  return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
4550}
4551
4552SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
4553                                    MVT VT2, MVT VT3, MVT VT4,
4554                                    const SDValue *Ops, unsigned NumOps) {
4555  SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
4556  return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
4557}
4558
4559SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
4560                                    const std::vector<MVT> &ResultTys,
4561                                    const SDValue *Ops, unsigned NumOps) {
4562  return getNode(~Opcode, dl, ResultTys, Ops, NumOps).getNode();
4563}
4564
4565/// getNodeIfExists - Get the specified node if it's already available, or
4566/// else return NULL.
4567SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
4568                                      const SDValue *Ops, unsigned NumOps) {
4569  if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
4570    FoldingSetNodeID ID;
4571    AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4572    void *IP = 0;
4573    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4574      return E;
4575  }
4576  return NULL;
4577}
4578
4579/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
4580/// This can cause recursive merging of nodes in the DAG.
4581///
4582/// This version assumes From has a single result value.
4583///
4584void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
4585                                      DAGUpdateListener *UpdateListener) {
4586  SDNode *From = FromN.getNode();
4587  assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
4588         "Cannot replace with this method!");
4589  assert(From != To.getNode() && "Cannot replace uses of with self");
4590
4591  // Iterate over all the existing uses of From. New uses will be added
4592  // to the beginning of the use list, which we avoid visiting.
4593  // This specifically avoids visiting uses of From that arise while the
4594  // replacement is happening, because any such uses would be the result
4595  // of CSE: If an existing node looks like From after one of its operands
4596  // is replaced by To, we don't want to replace of all its users with To
4597  // too. See PR3018 for more info.
4598  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
4599  while (UI != UE) {
4600    SDNode *User = *UI;
4601
4602    // This node is about to morph, remove its old self from the CSE maps.
4603    RemoveNodeFromCSEMaps(User);
4604
4605    // A user can appear in a use list multiple times, and when this
4606    // happens the uses are usually next to each other in the list.
4607    // To help reduce the number of CSE recomputations, process all
4608    // the uses of this user that we can find this way.
4609    do {
4610      SDUse &Use = UI.getUse();
4611      ++UI;
4612      Use.set(To);
4613    } while (UI != UE && *UI == User);
4614
4615    // Now that we have modified User, add it back to the CSE maps.  If it
4616    // already exists there, recursively merge the results together.
4617    AddModifiedNodeToCSEMaps(User, UpdateListener);
4618  }
4619}
4620
4621/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
4622/// This can cause recursive merging of nodes in the DAG.
4623///
4624/// This version assumes that for each value of From, there is a
4625/// corresponding value in To in the same position with the same type.
4626///
4627void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
4628                                      DAGUpdateListener *UpdateListener) {
4629#ifndef NDEBUG
4630  for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
4631    assert((!From->hasAnyUseOfValue(i) ||
4632            From->getValueType(i) == To->getValueType(i)) &&
4633           "Cannot use this version of ReplaceAllUsesWith!");
4634#endif
4635
4636  // Handle the trivial case.
4637  if (From == To)
4638    return;
4639
4640  // Iterate over just the existing users of From. See the comments in
4641  // the ReplaceAllUsesWith above.
4642  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
4643  while (UI != UE) {
4644    SDNode *User = *UI;
4645
4646    // This node is about to morph, remove its old self from the CSE maps.
4647    RemoveNodeFromCSEMaps(User);
4648
4649    // A user can appear in a use list multiple times, and when this
4650    // happens the uses are usually next to each other in the list.
4651    // To help reduce the number of CSE recomputations, process all
4652    // the uses of this user that we can find this way.
4653    do {
4654      SDUse &Use = UI.getUse();
4655      ++UI;
4656      Use.setNode(To);
4657    } while (UI != UE && *UI == User);
4658
4659    // Now that we have modified User, add it back to the CSE maps.  If it
4660    // already exists there, recursively merge the results together.
4661    AddModifiedNodeToCSEMaps(User, UpdateListener);
4662  }
4663}
4664
4665/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
4666/// This can cause recursive merging of nodes in the DAG.
4667///
4668/// This version can replace From with any result values.  To must match the
4669/// number and types of values returned by From.
4670void SelectionDAG::ReplaceAllUsesWith(SDNode *From,
4671                                      const SDValue *To,
4672                                      DAGUpdateListener *UpdateListener) {
4673  if (From->getNumValues() == 1)  // Handle the simple case efficiently.
4674    return ReplaceAllUsesWith(SDValue(From, 0), To[0], UpdateListener);
4675
4676  // Iterate over just the existing users of From. See the comments in
4677  // the ReplaceAllUsesWith above.
4678  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
4679  while (UI != UE) {
4680    SDNode *User = *UI;
4681
4682    // This node is about to morph, remove its old self from the CSE maps.
4683    RemoveNodeFromCSEMaps(User);
4684
4685    // A user can appear in a use list multiple times, and when this
4686    // happens the uses are usually next to each other in the list.
4687    // To help reduce the number of CSE recomputations, process all
4688    // the uses of this user that we can find this way.
4689    do {
4690      SDUse &Use = UI.getUse();
4691      const SDValue &ToOp = To[Use.getResNo()];
4692      ++UI;
4693      Use.set(ToOp);
4694    } while (UI != UE && *UI == User);
4695
4696    // Now that we have modified User, add it back to the CSE maps.  If it
4697    // already exists there, recursively merge the results together.
4698    AddModifiedNodeToCSEMaps(User, UpdateListener);
4699  }
4700}
4701
4702/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
4703/// uses of other values produced by From.getNode() alone.  The Deleted
4704/// vector is handled the same way as for ReplaceAllUsesWith.
4705void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
4706                                             DAGUpdateListener *UpdateListener){
4707  // Handle the really simple, really trivial case efficiently.
4708  if (From == To) return;
4709
4710  // Handle the simple, trivial, case efficiently.
4711  if (From.getNode()->getNumValues() == 1) {
4712    ReplaceAllUsesWith(From, To, UpdateListener);
4713    return;
4714  }
4715
4716  // Iterate over just the existing users of From. See the comments in
4717  // the ReplaceAllUsesWith above.
4718  SDNode::use_iterator UI = From.getNode()->use_begin(),
4719                       UE = From.getNode()->use_end();
4720  while (UI != UE) {
4721    SDNode *User = *UI;
4722    bool UserRemovedFromCSEMaps = false;
4723
4724    // A user can appear in a use list multiple times, and when this
4725    // happens the uses are usually next to each other in the list.
4726    // To help reduce the number of CSE recomputations, process all
4727    // the uses of this user that we can find this way.
4728    do {
4729      SDUse &Use = UI.getUse();
4730
4731      // Skip uses of different values from the same node.
4732      if (Use.getResNo() != From.getResNo()) {
4733        ++UI;
4734        continue;
4735      }
4736
4737      // If this node hasn't been modified yet, it's still in the CSE maps,
4738      // so remove its old self from the CSE maps.
4739      if (!UserRemovedFromCSEMaps) {
4740        RemoveNodeFromCSEMaps(User);
4741        UserRemovedFromCSEMaps = true;
4742      }
4743
4744      ++UI;
4745      Use.set(To);
4746    } while (UI != UE && *UI == User);
4747
4748    // We are iterating over all uses of the From node, so if a use
4749    // doesn't use the specific value, no changes are made.
4750    if (!UserRemovedFromCSEMaps)
4751      continue;
4752
4753    // Now that we have modified User, add it back to the CSE maps.  If it
4754    // already exists there, recursively merge the results together.
4755    AddModifiedNodeToCSEMaps(User, UpdateListener);
4756  }
4757}
4758
4759namespace {
4760  /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
4761  /// to record information about a use.
4762  struct UseMemo {
4763    SDNode *User;
4764    unsigned Index;
4765    SDUse *Use;
4766  };
4767
4768  /// operator< - Sort Memos by User.
4769  bool operator<(const UseMemo &L, const UseMemo &R) {
4770    return (intptr_t)L.User < (intptr_t)R.User;
4771  }
4772}
4773
4774/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
4775/// uses of other values produced by From.getNode() alone.  The same value
4776/// may appear in both the From and To list.  The Deleted vector is
4777/// handled the same way as for ReplaceAllUsesWith.
4778void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
4779                                              const SDValue *To,
4780                                              unsigned Num,
4781                                              DAGUpdateListener *UpdateListener){
4782  // Handle the simple, trivial case efficiently.
4783  if (Num == 1)
4784    return ReplaceAllUsesOfValueWith(*From, *To, UpdateListener);
4785
4786  // Read up all the uses and make records of them. This helps
4787  // processing new uses that are introduced during the
4788  // replacement process.
4789  SmallVector<UseMemo, 4> Uses;
4790  for (unsigned i = 0; i != Num; ++i) {
4791    unsigned FromResNo = From[i].getResNo();
4792    SDNode *FromNode = From[i].getNode();
4793    for (SDNode::use_iterator UI = FromNode->use_begin(),
4794         E = FromNode->use_end(); UI != E; ++UI) {
4795      SDUse &Use = UI.getUse();
4796      if (Use.getResNo() == FromResNo) {
4797        UseMemo Memo = { *UI, i, &Use };
4798        Uses.push_back(Memo);
4799      }
4800    }
4801  }
4802
4803  // Sort the uses, so that all the uses from a given User are together.
4804  std::sort(Uses.begin(), Uses.end());
4805
4806  for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
4807       UseIndex != UseIndexEnd; ) {
4808    // We know that this user uses some value of From.  If it is the right
4809    // value, update it.
4810    SDNode *User = Uses[UseIndex].User;
4811
4812    // This node is about to morph, remove its old self from the CSE maps.
4813    RemoveNodeFromCSEMaps(User);
4814
4815    // The Uses array is sorted, so all the uses for a given User
4816    // are next to each other in the list.
4817    // To help reduce the number of CSE recomputations, process all
4818    // the uses of this user that we can find this way.
4819    do {
4820      unsigned i = Uses[UseIndex].Index;
4821      SDUse &Use = *Uses[UseIndex].Use;
4822      ++UseIndex;
4823
4824      Use.set(To[i]);
4825    } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
4826
4827    // Now that we have modified User, add it back to the CSE maps.  If it
4828    // already exists there, recursively merge the results together.
4829    AddModifiedNodeToCSEMaps(User, UpdateListener);
4830  }
4831}
4832
4833/// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
4834/// based on their topological order. It returns the maximum id and a vector
4835/// of the SDNodes* in assigned order by reference.
4836unsigned SelectionDAG::AssignTopologicalOrder() {
4837
4838  unsigned DAGSize = 0;
4839
4840  // SortedPos tracks the progress of the algorithm. Nodes before it are
4841  // sorted, nodes after it are unsorted. When the algorithm completes
4842  // it is at the end of the list.
4843  allnodes_iterator SortedPos = allnodes_begin();
4844
4845  // Visit all the nodes. Move nodes with no operands to the front of
4846  // the list immediately. Annotate nodes that do have operands with their
4847  // operand count. Before we do this, the Node Id fields of the nodes
4848  // may contain arbitrary values. After, the Node Id fields for nodes
4849  // before SortedPos will contain the topological sort index, and the
4850  // Node Id fields for nodes At SortedPos and after will contain the
4851  // count of outstanding operands.
4852  for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
4853    SDNode *N = I++;
4854    unsigned Degree = N->getNumOperands();
4855    if (Degree == 0) {
4856      // A node with no uses, add it to the result array immediately.
4857      N->setNodeId(DAGSize++);
4858      allnodes_iterator Q = N;
4859      if (Q != SortedPos)
4860        SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
4861      ++SortedPos;
4862    } else {
4863      // Temporarily use the Node Id as scratch space for the degree count.
4864      N->setNodeId(Degree);
4865    }
4866  }
4867
4868  // Visit all the nodes. As we iterate, moves nodes into sorted order,
4869  // such that by the time the end is reached all nodes will be sorted.
4870  for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
4871    SDNode *N = I;
4872    for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
4873         UI != UE; ++UI) {
4874      SDNode *P = *UI;
4875      unsigned Degree = P->getNodeId();
4876      --Degree;
4877      if (Degree == 0) {
4878        // All of P's operands are sorted, so P may sorted now.
4879        P->setNodeId(DAGSize++);
4880        if (P != SortedPos)
4881          SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
4882        ++SortedPos;
4883      } else {
4884        // Update P's outstanding operand count.
4885        P->setNodeId(Degree);
4886      }
4887    }
4888  }
4889
4890  assert(SortedPos == AllNodes.end() &&
4891         "Topological sort incomplete!");
4892  assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
4893         "First node in topological sort is not the entry token!");
4894  assert(AllNodes.front().getNodeId() == 0 &&
4895         "First node in topological sort has non-zero id!");
4896  assert(AllNodes.front().getNumOperands() == 0 &&
4897         "First node in topological sort has operands!");
4898  assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
4899         "Last node in topologic sort has unexpected id!");
4900  assert(AllNodes.back().use_empty() &&
4901         "Last node in topologic sort has users!");
4902  assert(DAGSize == allnodes_size() && "Node count mismatch!");
4903  return DAGSize;
4904}
4905
4906
4907
4908//===----------------------------------------------------------------------===//
4909//                              SDNode Class
4910//===----------------------------------------------------------------------===//
4911
4912HandleSDNode::~HandleSDNode() {
4913  DropOperands();
4914}
4915
4916GlobalAddressSDNode::GlobalAddressSDNode(bool isTarget, const GlobalValue *GA,
4917                                         MVT VT, int64_t o)
4918  : SDNode(isa<GlobalVariable>(GA) &&
4919           cast<GlobalVariable>(GA)->isThreadLocal() ?
4920           // Thread Local
4921           (isTarget ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress) :
4922           // Non Thread Local
4923           (isTarget ? ISD::TargetGlobalAddress : ISD::GlobalAddress),
4924           DebugLoc::getUnknownLoc(), getSDVTList(VT)), Offset(o) {
4925  TheGlobal = const_cast<GlobalValue*>(GA);
4926}
4927
4928MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, MVT memvt,
4929                     const Value *srcValue, int SVO,
4930                     unsigned alignment, bool vol)
4931 : SDNode(Opc, dl, VTs), MemoryVT(memvt), SrcValue(srcValue), SVOffset(SVO) {
4932  SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, vol, alignment);
4933  assert(isPowerOf2_32(alignment) && "Alignment is not a power of 2!");
4934  assert(getAlignment() == alignment && "Alignment representation error!");
4935  assert(isVolatile() == vol && "Volatile representation error!");
4936}
4937
4938MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
4939                     const SDValue *Ops,
4940                     unsigned NumOps, MVT memvt, const Value *srcValue,
4941                     int SVO, unsigned alignment, bool vol)
4942   : SDNode(Opc, dl, VTs, Ops, NumOps),
4943     MemoryVT(memvt), SrcValue(srcValue), SVOffset(SVO) {
4944  SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, vol, alignment);
4945  assert(isPowerOf2_32(alignment) && "Alignment is not a power of 2!");
4946  assert(getAlignment() == alignment && "Alignment representation error!");
4947  assert(isVolatile() == vol && "Volatile representation error!");
4948}
4949
4950/// getMemOperand - Return a MachineMemOperand object describing the memory
4951/// reference performed by this memory reference.
4952MachineMemOperand MemSDNode::getMemOperand() const {
4953  int Flags = 0;
4954  if (isa<LoadSDNode>(this))
4955    Flags = MachineMemOperand::MOLoad;
4956  else if (isa<StoreSDNode>(this))
4957    Flags = MachineMemOperand::MOStore;
4958  else if (isa<AtomicSDNode>(this)) {
4959    Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
4960  }
4961  else {
4962    const MemIntrinsicSDNode* MemIntrinNode = dyn_cast<MemIntrinsicSDNode>(this);
4963    assert(MemIntrinNode && "Unknown MemSDNode opcode!");
4964    if (MemIntrinNode->readMem()) Flags |= MachineMemOperand::MOLoad;
4965    if (MemIntrinNode->writeMem()) Flags |= MachineMemOperand::MOStore;
4966  }
4967
4968  int Size = (getMemoryVT().getSizeInBits() + 7) >> 3;
4969  if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
4970
4971  // Check if the memory reference references a frame index
4972  const FrameIndexSDNode *FI =
4973  dyn_cast<const FrameIndexSDNode>(getBasePtr().getNode());
4974  if (!getSrcValue() && FI)
4975    return MachineMemOperand(PseudoSourceValue::getFixedStack(FI->getIndex()),
4976                             Flags, 0, Size, getAlignment());
4977  else
4978    return MachineMemOperand(getSrcValue(), Flags, getSrcValueOffset(),
4979                             Size, getAlignment());
4980}
4981
4982/// Profile - Gather unique data for the node.
4983///
4984void SDNode::Profile(FoldingSetNodeID &ID) const {
4985  AddNodeIDNode(ID, this);
4986}
4987
4988static ManagedStatic<std::set<MVT, MVT::compareRawBits> > EVTs;
4989static MVT VTs[MVT::LAST_VALUETYPE];
4990static ManagedStatic<sys::SmartMutex<true> > VTMutex;
4991
4992/// getValueTypeList - Return a pointer to the specified value type.
4993///
4994const MVT *SDNode::getValueTypeList(MVT VT) {
4995  sys::SmartScopedLock<true> Lock(&*VTMutex);
4996  if (VT.isExtended()) {
4997    return &(*EVTs->insert(VT).first);
4998  } else {
4999    VTs[VT.getSimpleVT()] = VT;
5000    return &VTs[VT.getSimpleVT()];
5001  }
5002}
5003
5004/// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
5005/// indicated value.  This method ignores uses of other values defined by this
5006/// operation.
5007bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
5008  assert(Value < getNumValues() && "Bad value!");
5009
5010  // TODO: Only iterate over uses of a given value of the node
5011  for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
5012    if (UI.getUse().getResNo() == Value) {
5013      if (NUses == 0)
5014        return false;
5015      --NUses;
5016    }
5017  }
5018
5019  // Found exactly the right number of uses?
5020  return NUses == 0;
5021}
5022
5023
5024/// hasAnyUseOfValue - Return true if there are any use of the indicated
5025/// value. This method ignores uses of other values defined by this operation.
5026bool SDNode::hasAnyUseOfValue(unsigned Value) const {
5027  assert(Value < getNumValues() && "Bad value!");
5028
5029  for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
5030    if (UI.getUse().getResNo() == Value)
5031      return true;
5032
5033  return false;
5034}
5035
5036
5037/// isOnlyUserOf - Return true if this node is the only use of N.
5038///
5039bool SDNode::isOnlyUserOf(SDNode *N) const {
5040  bool Seen = false;
5041  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
5042    SDNode *User = *I;
5043    if (User == this)
5044      Seen = true;
5045    else
5046      return false;
5047  }
5048
5049  return Seen;
5050}
5051
5052/// isOperand - Return true if this node is an operand of N.
5053///
5054bool SDValue::isOperandOf(SDNode *N) const {
5055  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5056    if (*this == N->getOperand(i))
5057      return true;
5058  return false;
5059}
5060
5061bool SDNode::isOperandOf(SDNode *N) const {
5062  for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
5063    if (this == N->OperandList[i].getNode())
5064      return true;
5065  return false;
5066}
5067
5068/// reachesChainWithoutSideEffects - Return true if this operand (which must
5069/// be a chain) reaches the specified operand without crossing any
5070/// side-effecting instructions.  In practice, this looks through token
5071/// factors and non-volatile loads.  In order to remain efficient, this only
5072/// looks a couple of nodes in, it does not do an exhaustive search.
5073bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
5074                                               unsigned Depth) const {
5075  if (*this == Dest) return true;
5076
5077  // Don't search too deeply, we just want to be able to see through
5078  // TokenFactor's etc.
5079  if (Depth == 0) return false;
5080
5081  // If this is a token factor, all inputs to the TF happen in parallel.  If any
5082  // of the operands of the TF reach dest, then we can do the xform.
5083  if (getOpcode() == ISD::TokenFactor) {
5084    for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5085      if (getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
5086        return true;
5087    return false;
5088  }
5089
5090  // Loads don't have side effects, look through them.
5091  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
5092    if (!Ld->isVolatile())
5093      return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
5094  }
5095  return false;
5096}
5097
5098
5099static void findPredecessor(SDNode *N, const SDNode *P, bool &found,
5100                            SmallPtrSet<SDNode *, 32> &Visited) {
5101  if (found || !Visited.insert(N))
5102    return;
5103
5104  for (unsigned i = 0, e = N->getNumOperands(); !found && i != e; ++i) {
5105    SDNode *Op = N->getOperand(i).getNode();
5106    if (Op == P) {
5107      found = true;
5108      return;
5109    }
5110    findPredecessor(Op, P, found, Visited);
5111  }
5112}
5113
5114/// isPredecessorOf - Return true if this node is a predecessor of N. This node
5115/// is either an operand of N or it can be reached by recursively traversing
5116/// up the operands.
5117/// NOTE: this is an expensive method. Use it carefully.
5118bool SDNode::isPredecessorOf(SDNode *N) const {
5119  SmallPtrSet<SDNode *, 32> Visited;
5120  bool found = false;
5121  findPredecessor(N, this, found, Visited);
5122  return found;
5123}
5124
5125uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
5126  assert(Num < NumOperands && "Invalid child # of SDNode!");
5127  return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
5128}
5129
5130std::string SDNode::getOperationName(const SelectionDAG *G) const {
5131  switch (getOpcode()) {
5132  default:
5133    if (getOpcode() < ISD::BUILTIN_OP_END)
5134      return "<<Unknown DAG Node>>";
5135    if (isMachineOpcode()) {
5136      if (G)
5137        if (const TargetInstrInfo *TII = G->getTarget().getInstrInfo())
5138          if (getMachineOpcode() < TII->getNumOpcodes())
5139            return TII->get(getMachineOpcode()).getName();
5140      return "<<Unknown Machine Node>>";
5141    }
5142    if (G) {
5143      const TargetLowering &TLI = G->getTargetLoweringInfo();
5144      const char *Name = TLI.getTargetNodeName(getOpcode());
5145      if (Name) return Name;
5146      return "<<Unknown Target Node>>";
5147    }
5148    return "<<Unknown Node>>";
5149
5150#ifndef NDEBUG
5151  case ISD::DELETED_NODE:
5152    return "<<Deleted Node!>>";
5153#endif
5154  case ISD::PREFETCH:      return "Prefetch";
5155  case ISD::MEMBARRIER:    return "MemBarrier";
5156  case ISD::ATOMIC_CMP_SWAP:    return "AtomicCmpSwap";
5157  case ISD::ATOMIC_SWAP:        return "AtomicSwap";
5158  case ISD::ATOMIC_LOAD_ADD:    return "AtomicLoadAdd";
5159  case ISD::ATOMIC_LOAD_SUB:    return "AtomicLoadSub";
5160  case ISD::ATOMIC_LOAD_AND:    return "AtomicLoadAnd";
5161  case ISD::ATOMIC_LOAD_OR:     return "AtomicLoadOr";
5162  case ISD::ATOMIC_LOAD_XOR:    return "AtomicLoadXor";
5163  case ISD::ATOMIC_LOAD_NAND:   return "AtomicLoadNand";
5164  case ISD::ATOMIC_LOAD_MIN:    return "AtomicLoadMin";
5165  case ISD::ATOMIC_LOAD_MAX:    return "AtomicLoadMax";
5166  case ISD::ATOMIC_LOAD_UMIN:   return "AtomicLoadUMin";
5167  case ISD::ATOMIC_LOAD_UMAX:   return "AtomicLoadUMax";
5168  case ISD::PCMARKER:      return "PCMarker";
5169  case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
5170  case ISD::SRCVALUE:      return "SrcValue";
5171  case ISD::MEMOPERAND:    return "MemOperand";
5172  case ISD::EntryToken:    return "EntryToken";
5173  case ISD::TokenFactor:   return "TokenFactor";
5174  case ISD::AssertSext:    return "AssertSext";
5175  case ISD::AssertZext:    return "AssertZext";
5176
5177  case ISD::BasicBlock:    return "BasicBlock";
5178  case ISD::ARG_FLAGS:     return "ArgFlags";
5179  case ISD::VALUETYPE:     return "ValueType";
5180  case ISD::Register:      return "Register";
5181
5182  case ISD::Constant:      return "Constant";
5183  case ISD::ConstantFP:    return "ConstantFP";
5184  case ISD::GlobalAddress: return "GlobalAddress";
5185  case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
5186  case ISD::FrameIndex:    return "FrameIndex";
5187  case ISD::JumpTable:     return "JumpTable";
5188  case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
5189  case ISD::RETURNADDR: return "RETURNADDR";
5190  case ISD::FRAMEADDR: return "FRAMEADDR";
5191  case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
5192  case ISD::EXCEPTIONADDR: return "EXCEPTIONADDR";
5193  case ISD::EHSELECTION: return "EHSELECTION";
5194  case ISD::EH_RETURN: return "EH_RETURN";
5195  case ISD::ConstantPool:  return "ConstantPool";
5196  case ISD::ExternalSymbol: return "ExternalSymbol";
5197  case ISD::INTRINSIC_WO_CHAIN: {
5198    unsigned IID = cast<ConstantSDNode>(getOperand(0))->getZExtValue();
5199    return Intrinsic::getName((Intrinsic::ID)IID);
5200  }
5201  case ISD::INTRINSIC_VOID:
5202  case ISD::INTRINSIC_W_CHAIN: {
5203    unsigned IID = cast<ConstantSDNode>(getOperand(1))->getZExtValue();
5204    return Intrinsic::getName((Intrinsic::ID)IID);
5205  }
5206
5207  case ISD::BUILD_VECTOR:   return "BUILD_VECTOR";
5208  case ISD::TargetConstant: return "TargetConstant";
5209  case ISD::TargetConstantFP:return "TargetConstantFP";
5210  case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
5211  case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
5212  case ISD::TargetFrameIndex: return "TargetFrameIndex";
5213  case ISD::TargetJumpTable:  return "TargetJumpTable";
5214  case ISD::TargetConstantPool:  return "TargetConstantPool";
5215  case ISD::TargetExternalSymbol: return "TargetExternalSymbol";
5216
5217  case ISD::CopyToReg:     return "CopyToReg";
5218  case ISD::CopyFromReg:   return "CopyFromReg";
5219  case ISD::UNDEF:         return "undef";
5220  case ISD::MERGE_VALUES:  return "merge_values";
5221  case ISD::INLINEASM:     return "inlineasm";
5222  case ISD::DBG_LABEL:     return "dbg_label";
5223  case ISD::EH_LABEL:      return "eh_label";
5224  case ISD::DECLARE:       return "declare";
5225  case ISD::HANDLENODE:    return "handlenode";
5226  case ISD::FORMAL_ARGUMENTS: return "formal_arguments";
5227  case ISD::CALL:          return "call";
5228
5229  // Unary operators
5230  case ISD::FABS:   return "fabs";
5231  case ISD::FNEG:   return "fneg";
5232  case ISD::FSQRT:  return "fsqrt";
5233  case ISD::FSIN:   return "fsin";
5234  case ISD::FCOS:   return "fcos";
5235  case ISD::FPOWI:  return "fpowi";
5236  case ISD::FPOW:   return "fpow";
5237  case ISD::FTRUNC: return "ftrunc";
5238  case ISD::FFLOOR: return "ffloor";
5239  case ISD::FCEIL:  return "fceil";
5240  case ISD::FRINT:  return "frint";
5241  case ISD::FNEARBYINT: return "fnearbyint";
5242
5243  // Binary operators
5244  case ISD::ADD:    return "add";
5245  case ISD::SUB:    return "sub";
5246  case ISD::MUL:    return "mul";
5247  case ISD::MULHU:  return "mulhu";
5248  case ISD::MULHS:  return "mulhs";
5249  case ISD::SDIV:   return "sdiv";
5250  case ISD::UDIV:   return "udiv";
5251  case ISD::SREM:   return "srem";
5252  case ISD::UREM:   return "urem";
5253  case ISD::SMUL_LOHI:  return "smul_lohi";
5254  case ISD::UMUL_LOHI:  return "umul_lohi";
5255  case ISD::SDIVREM:    return "sdivrem";
5256  case ISD::UDIVREM:    return "udivrem";
5257  case ISD::AND:    return "and";
5258  case ISD::OR:     return "or";
5259  case ISD::XOR:    return "xor";
5260  case ISD::SHL:    return "shl";
5261  case ISD::SRA:    return "sra";
5262  case ISD::SRL:    return "srl";
5263  case ISD::ROTL:   return "rotl";
5264  case ISD::ROTR:   return "rotr";
5265  case ISD::FADD:   return "fadd";
5266  case ISD::FSUB:   return "fsub";
5267  case ISD::FMUL:   return "fmul";
5268  case ISD::FDIV:   return "fdiv";
5269  case ISD::FREM:   return "frem";
5270  case ISD::FCOPYSIGN: return "fcopysign";
5271  case ISD::FGETSIGN:  return "fgetsign";
5272
5273  case ISD::SETCC:       return "setcc";
5274  case ISD::VSETCC:      return "vsetcc";
5275  case ISD::SELECT:      return "select";
5276  case ISD::SELECT_CC:   return "select_cc";
5277  case ISD::INSERT_VECTOR_ELT:   return "insert_vector_elt";
5278  case ISD::EXTRACT_VECTOR_ELT:  return "extract_vector_elt";
5279  case ISD::CONCAT_VECTORS:      return "concat_vectors";
5280  case ISD::EXTRACT_SUBVECTOR:   return "extract_subvector";
5281  case ISD::SCALAR_TO_VECTOR:    return "scalar_to_vector";
5282  case ISD::VECTOR_SHUFFLE:      return "vector_shuffle";
5283  case ISD::CARRY_FALSE:         return "carry_false";
5284  case ISD::ADDC:        return "addc";
5285  case ISD::ADDE:        return "adde";
5286  case ISD::SADDO:       return "saddo";
5287  case ISD::UADDO:       return "uaddo";
5288  case ISD::SSUBO:       return "ssubo";
5289  case ISD::USUBO:       return "usubo";
5290  case ISD::SMULO:       return "smulo";
5291  case ISD::UMULO:       return "umulo";
5292  case ISD::SUBC:        return "subc";
5293  case ISD::SUBE:        return "sube";
5294  case ISD::SHL_PARTS:   return "shl_parts";
5295  case ISD::SRA_PARTS:   return "sra_parts";
5296  case ISD::SRL_PARTS:   return "srl_parts";
5297
5298  // Conversion operators.
5299  case ISD::SIGN_EXTEND: return "sign_extend";
5300  case ISD::ZERO_EXTEND: return "zero_extend";
5301  case ISD::ANY_EXTEND:  return "any_extend";
5302  case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
5303  case ISD::TRUNCATE:    return "truncate";
5304  case ISD::FP_ROUND:    return "fp_round";
5305  case ISD::FLT_ROUNDS_: return "flt_rounds";
5306  case ISD::FP_ROUND_INREG: return "fp_round_inreg";
5307  case ISD::FP_EXTEND:   return "fp_extend";
5308
5309  case ISD::SINT_TO_FP:  return "sint_to_fp";
5310  case ISD::UINT_TO_FP:  return "uint_to_fp";
5311  case ISD::FP_TO_SINT:  return "fp_to_sint";
5312  case ISD::FP_TO_UINT:  return "fp_to_uint";
5313  case ISD::BIT_CONVERT: return "bit_convert";
5314
5315  case ISD::CONVERT_RNDSAT: {
5316    switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
5317    default: assert(0 && "Unknown cvt code!");
5318    case ISD::CVT_FF:  return "cvt_ff";
5319    case ISD::CVT_FS:  return "cvt_fs";
5320    case ISD::CVT_FU:  return "cvt_fu";
5321    case ISD::CVT_SF:  return "cvt_sf";
5322    case ISD::CVT_UF:  return "cvt_uf";
5323    case ISD::CVT_SS:  return "cvt_ss";
5324    case ISD::CVT_SU:  return "cvt_su";
5325    case ISD::CVT_US:  return "cvt_us";
5326    case ISD::CVT_UU:  return "cvt_uu";
5327    }
5328  }
5329
5330    // Control flow instructions
5331  case ISD::BR:      return "br";
5332  case ISD::BRIND:   return "brind";
5333  case ISD::BR_JT:   return "br_jt";
5334  case ISD::BRCOND:  return "brcond";
5335  case ISD::BR_CC:   return "br_cc";
5336  case ISD::RET:     return "ret";
5337  case ISD::CALLSEQ_START:  return "callseq_start";
5338  case ISD::CALLSEQ_END:    return "callseq_end";
5339
5340    // Other operators
5341  case ISD::LOAD:               return "load";
5342  case ISD::STORE:              return "store";
5343  case ISD::VAARG:              return "vaarg";
5344  case ISD::VACOPY:             return "vacopy";
5345  case ISD::VAEND:              return "vaend";
5346  case ISD::VASTART:            return "vastart";
5347  case ISD::DYNAMIC_STACKALLOC: return "dynamic_stackalloc";
5348  case ISD::EXTRACT_ELEMENT:    return "extract_element";
5349  case ISD::BUILD_PAIR:         return "build_pair";
5350  case ISD::STACKSAVE:          return "stacksave";
5351  case ISD::STACKRESTORE:       return "stackrestore";
5352  case ISD::TRAP:               return "trap";
5353
5354  // Bit manipulation
5355  case ISD::BSWAP:   return "bswap";
5356  case ISD::CTPOP:   return "ctpop";
5357  case ISD::CTTZ:    return "cttz";
5358  case ISD::CTLZ:    return "ctlz";
5359
5360  // Debug info
5361  case ISD::DBG_STOPPOINT: return "dbg_stoppoint";
5362  case ISD::DEBUG_LOC: return "debug_loc";
5363
5364  // Trampolines
5365  case ISD::TRAMPOLINE: return "trampoline";
5366
5367  case ISD::CONDCODE:
5368    switch (cast<CondCodeSDNode>(this)->get()) {
5369    default: assert(0 && "Unknown setcc condition!");
5370    case ISD::SETOEQ:  return "setoeq";
5371    case ISD::SETOGT:  return "setogt";
5372    case ISD::SETOGE:  return "setoge";
5373    case ISD::SETOLT:  return "setolt";
5374    case ISD::SETOLE:  return "setole";
5375    case ISD::SETONE:  return "setone";
5376
5377    case ISD::SETO:    return "seto";
5378    case ISD::SETUO:   return "setuo";
5379    case ISD::SETUEQ:  return "setue";
5380    case ISD::SETUGT:  return "setugt";
5381    case ISD::SETUGE:  return "setuge";
5382    case ISD::SETULT:  return "setult";
5383    case ISD::SETULE:  return "setule";
5384    case ISD::SETUNE:  return "setune";
5385
5386    case ISD::SETEQ:   return "seteq";
5387    case ISD::SETGT:   return "setgt";
5388    case ISD::SETGE:   return "setge";
5389    case ISD::SETLT:   return "setlt";
5390    case ISD::SETLE:   return "setle";
5391    case ISD::SETNE:   return "setne";
5392    }
5393  }
5394}
5395
5396const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
5397  switch (AM) {
5398  default:
5399    return "";
5400  case ISD::PRE_INC:
5401    return "<pre-inc>";
5402  case ISD::PRE_DEC:
5403    return "<pre-dec>";
5404  case ISD::POST_INC:
5405    return "<post-inc>";
5406  case ISD::POST_DEC:
5407    return "<post-dec>";
5408  }
5409}
5410
5411std::string ISD::ArgFlagsTy::getArgFlagsString() {
5412  std::string S = "< ";
5413
5414  if (isZExt())
5415    S += "zext ";
5416  if (isSExt())
5417    S += "sext ";
5418  if (isInReg())
5419    S += "inreg ";
5420  if (isSRet())
5421    S += "sret ";
5422  if (isByVal())
5423    S += "byval ";
5424  if (isNest())
5425    S += "nest ";
5426  if (getByValAlign())
5427    S += "byval-align:" + utostr(getByValAlign()) + " ";
5428  if (getOrigAlign())
5429    S += "orig-align:" + utostr(getOrigAlign()) + " ";
5430  if (getByValSize())
5431    S += "byval-size:" + utostr(getByValSize()) + " ";
5432  return S + ">";
5433}
5434
5435void SDNode::dump() const { dump(0); }
5436void SDNode::dump(const SelectionDAG *G) const {
5437  print(errs(), G);
5438}
5439
5440void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
5441  OS << (void*)this << ": ";
5442
5443  for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
5444    if (i) OS << ",";
5445    if (getValueType(i) == MVT::Other)
5446      OS << "ch";
5447    else
5448      OS << getValueType(i).getMVTString();
5449  }
5450  OS << " = " << getOperationName(G);
5451}
5452
5453void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
5454  if (!isTargetOpcode() && getOpcode() == ISD::VECTOR_SHUFFLE) {
5455    const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(this);
5456    OS << "<";
5457    for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
5458      int Idx = SVN->getMaskElt(i);
5459      if (i) OS << ",";
5460      if (Idx < 0)
5461        OS << "u";
5462      else
5463        OS << Idx;
5464    }
5465    OS << ">";
5466  }
5467
5468  if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
5469    OS << '<' << CSDN->getAPIntValue() << '>';
5470  } else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
5471    if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
5472      OS << '<' << CSDN->getValueAPF().convertToFloat() << '>';
5473    else if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEdouble)
5474      OS << '<' << CSDN->getValueAPF().convertToDouble() << '>';
5475    else {
5476      OS << "<APFloat(";
5477      CSDN->getValueAPF().bitcastToAPInt().dump();
5478      OS << ")>";
5479    }
5480  } else if (const GlobalAddressSDNode *GADN =
5481             dyn_cast<GlobalAddressSDNode>(this)) {
5482    int64_t offset = GADN->getOffset();
5483    OS << '<';
5484    WriteAsOperand(OS, GADN->getGlobal());
5485    OS << '>';
5486    if (offset > 0)
5487      OS << " + " << offset;
5488    else
5489      OS << " " << offset;
5490  } else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
5491    OS << "<" << FIDN->getIndex() << ">";
5492  } else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
5493    OS << "<" << JTDN->getIndex() << ">";
5494  } else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
5495    int offset = CP->getOffset();
5496    if (CP->isMachineConstantPoolEntry())
5497      OS << "<" << *CP->getMachineCPVal() << ">";
5498    else
5499      OS << "<" << *CP->getConstVal() << ">";
5500    if (offset > 0)
5501      OS << " + " << offset;
5502    else
5503      OS << " " << offset;
5504  } else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
5505    OS << "<";
5506    const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
5507    if (LBB)
5508      OS << LBB->getName() << " ";
5509    OS << (const void*)BBDN->getBasicBlock() << ">";
5510  } else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
5511    if (G && R->getReg() &&
5512        TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
5513      OS << " " << G->getTarget().getRegisterInfo()->getName(R->getReg());
5514    } else {
5515      OS << " #" << R->getReg();
5516    }
5517  } else if (const ExternalSymbolSDNode *ES =
5518             dyn_cast<ExternalSymbolSDNode>(this)) {
5519    OS << "'" << ES->getSymbol() << "'";
5520  } else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
5521    if (M->getValue())
5522      OS << "<" << M->getValue() << ">";
5523    else
5524      OS << "<null>";
5525  } else if (const MemOperandSDNode *M = dyn_cast<MemOperandSDNode>(this)) {
5526    if (M->MO.getValue())
5527      OS << "<" << M->MO.getValue() << ":" << M->MO.getOffset() << ">";
5528    else
5529      OS << "<null:" << M->MO.getOffset() << ">";
5530  } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(this)) {
5531    OS << N->getArgFlags().getArgFlagsString();
5532  } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
5533    OS << ":" << N->getVT().getMVTString();
5534  }
5535  else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
5536    const Value *SrcValue = LD->getSrcValue();
5537    int SrcOffset = LD->getSrcValueOffset();
5538    OS << " <";
5539    if (SrcValue)
5540      OS << SrcValue;
5541    else
5542      OS << "null";
5543    OS << ":" << SrcOffset << ">";
5544
5545    bool doExt = true;
5546    switch (LD->getExtensionType()) {
5547    default: doExt = false; break;
5548    case ISD::EXTLOAD: OS << " <anyext "; break;
5549    case ISD::SEXTLOAD: OS << " <sext "; break;
5550    case ISD::ZEXTLOAD: OS << " <zext "; break;
5551    }
5552    if (doExt)
5553      OS << LD->getMemoryVT().getMVTString() << ">";
5554
5555    const char *AM = getIndexedModeName(LD->getAddressingMode());
5556    if (*AM)
5557      OS << " " << AM;
5558    if (LD->isVolatile())
5559      OS << " <volatile>";
5560    OS << " alignment=" << LD->getAlignment();
5561  } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
5562    const Value *SrcValue = ST->getSrcValue();
5563    int SrcOffset = ST->getSrcValueOffset();
5564    OS << " <";
5565    if (SrcValue)
5566      OS << SrcValue;
5567    else
5568      OS << "null";
5569    OS << ":" << SrcOffset << ">";
5570
5571    if (ST->isTruncatingStore())
5572      OS << " <trunc " << ST->getMemoryVT().getMVTString() << ">";
5573
5574    const char *AM = getIndexedModeName(ST->getAddressingMode());
5575    if (*AM)
5576      OS << " " << AM;
5577    if (ST->isVolatile())
5578      OS << " <volatile>";
5579    OS << " alignment=" << ST->getAlignment();
5580  } else if (const AtomicSDNode* AT = dyn_cast<AtomicSDNode>(this)) {
5581    const Value *SrcValue = AT->getSrcValue();
5582    int SrcOffset = AT->getSrcValueOffset();
5583    OS << " <";
5584    if (SrcValue)
5585      OS << SrcValue;
5586    else
5587      OS << "null";
5588    OS << ":" << SrcOffset << ">";
5589    if (AT->isVolatile())
5590      OS << " <volatile>";
5591    OS << " alignment=" << AT->getAlignment();
5592  }
5593}
5594
5595void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
5596  print_types(OS, G);
5597  OS << " ";
5598  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
5599    if (i) OS << ", ";
5600    OS << (void*)getOperand(i).getNode();
5601    if (unsigned RN = getOperand(i).getResNo())
5602      OS << ":" << RN;
5603  }
5604  print_details(OS, G);
5605}
5606
5607static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
5608  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5609    if (N->getOperand(i).getNode()->hasOneUse())
5610      DumpNodes(N->getOperand(i).getNode(), indent+2, G);
5611    else
5612      cerr << "\n" << std::string(indent+2, ' ')
5613           << (void*)N->getOperand(i).getNode() << ": <multiple use>";
5614
5615
5616  cerr << "\n" << std::string(indent, ' ');
5617  N->dump(G);
5618}
5619
5620void SelectionDAG::dump() const {
5621  cerr << "SelectionDAG has " << AllNodes.size() << " nodes:";
5622
5623  for (allnodes_const_iterator I = allnodes_begin(), E = allnodes_end();
5624       I != E; ++I) {
5625    const SDNode *N = I;
5626    if (!N->hasOneUse() && N != getRoot().getNode())
5627      DumpNodes(N, 2, this);
5628  }
5629
5630  if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
5631
5632  cerr << "\n\n";
5633}
5634
5635void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
5636  print_types(OS, G);
5637  print_details(OS, G);
5638}
5639
5640typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
5641static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
5642                       const SelectionDAG *G, VisitedSDNodeSet &once) {
5643  if (!once.insert(N))          // If we've been here before, return now.
5644    return;
5645  // Dump the current SDNode, but don't end the line yet.
5646  OS << std::string(indent, ' ');
5647  N->printr(OS, G);
5648  // Having printed this SDNode, walk the children:
5649  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
5650    const SDNode *child = N->getOperand(i).getNode();
5651    if (i) OS << ",";
5652    OS << " ";
5653    if (child->getNumOperands() == 0) {
5654      // This child has no grandchildren; print it inline right here.
5655      child->printr(OS, G);
5656      once.insert(child);
5657    } else {          // Just the address.  FIXME: also print the child's opcode
5658      OS << (void*)child;
5659      if (unsigned RN = N->getOperand(i).getResNo())
5660        OS << ":" << RN;
5661    }
5662  }
5663  OS << "\n";
5664  // Dump children that have grandchildren on their own line(s).
5665  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
5666    const SDNode *child = N->getOperand(i).getNode();
5667    DumpNodesr(OS, child, indent+2, G, once);
5668  }
5669}
5670
5671void SDNode::dumpr() const {
5672  VisitedSDNodeSet once;
5673  DumpNodesr(errs(), this, 0, 0, once);
5674}
5675
5676
5677// getAddressSpace - Return the address space this GlobalAddress belongs to.
5678unsigned GlobalAddressSDNode::getAddressSpace() const {
5679  return getGlobal()->getType()->getAddressSpace();
5680}
5681
5682
5683const Type *ConstantPoolSDNode::getType() const {
5684  if (isMachineConstantPoolEntry())
5685    return Val.MachineCPVal->getType();
5686  return Val.ConstVal->getType();
5687}
5688
5689bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
5690                                        APInt &SplatUndef,
5691                                        unsigned &SplatBitSize,
5692                                        bool &HasAnyUndefs,
5693                                        unsigned MinSplatBits) {
5694  MVT VT = getValueType(0);
5695  assert(VT.isVector() && "Expected a vector type");
5696  unsigned sz = VT.getSizeInBits();
5697  if (MinSplatBits > sz)
5698    return false;
5699
5700  SplatValue = APInt(sz, 0);
5701  SplatUndef = APInt(sz, 0);
5702
5703  // Get the bits.  Bits with undefined values (when the corresponding element
5704  // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
5705  // in SplatValue.  If any of the values are not constant, give up and return
5706  // false.
5707  unsigned int nOps = getNumOperands();
5708  assert(nOps > 0 && "isConstantSplat has 0-size build vector");
5709  unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
5710  for (unsigned i = 0; i < nOps; ++i) {
5711    SDValue OpVal = getOperand(i);
5712    unsigned BitPos = i * EltBitSize;
5713
5714    if (OpVal.getOpcode() == ISD::UNDEF)
5715      SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos +EltBitSize);
5716    else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
5717      SplatValue |= (APInt(CN->getAPIntValue()).zextOrTrunc(EltBitSize).
5718                     zextOrTrunc(sz) << BitPos);
5719    else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
5720      SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
5721     else
5722      return false;
5723  }
5724
5725  // The build_vector is all constants or undefs.  Find the smallest element
5726  // size that splats the vector.
5727
5728  HasAnyUndefs = (SplatUndef != 0);
5729  while (sz > 8) {
5730
5731    unsigned HalfSize = sz / 2;
5732    APInt HighValue = APInt(SplatValue).lshr(HalfSize).trunc(HalfSize);
5733    APInt LowValue = APInt(SplatValue).trunc(HalfSize);
5734    APInt HighUndef = APInt(SplatUndef).lshr(HalfSize).trunc(HalfSize);
5735    APInt LowUndef = APInt(SplatUndef).trunc(HalfSize);
5736
5737    // If the two halves do not match (ignoring undef bits), stop here.
5738    if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
5739        MinSplatBits > HalfSize)
5740      break;
5741
5742    SplatValue = HighValue | LowValue;
5743    SplatUndef = HighUndef & LowUndef;
5744
5745    sz = HalfSize;
5746  }
5747
5748  SplatBitSize = sz;
5749  return true;
5750}
5751
5752bool ShuffleVectorSDNode::isSplatMask(const int *Mask, MVT VT) {
5753  // Find the first non-undef value in the shuffle mask.
5754  unsigned i, e;
5755  for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
5756    /* search */;
5757
5758  assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
5759
5760  // Make sure all remaining elements are either undef or the same as the first
5761  // non-undef value.
5762  for (int Idx = Mask[i]; i != e; ++i)
5763    if (Mask[i] >= 0 && Mask[i] != Idx)
5764      return false;
5765  return true;
5766}
5767