SelectionDAG.cpp revision cb2504ff73c3e9e2aaa1008ba3b83a31c51d819d
1//===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAG class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/SelectionDAG.h"
15#include "SDNodeOrdering.h"
16#include "llvm/Constants.h"
17#include "llvm/Analysis/ValueTracking.h"
18#include "llvm/Function.h"
19#include "llvm/GlobalAlias.h"
20#include "llvm/GlobalVariable.h"
21#include "llvm/Intrinsics.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Assembly/Writer.h"
24#include "llvm/CallingConv.h"
25#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineModuleInfo.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetData.h"
32#include "llvm/Target/TargetFrameInfo.h"
33#include "llvm/Target/TargetLowering.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Target/TargetInstrInfo.h"
36#include "llvm/Target/TargetIntrinsicInfo.h"
37#include "llvm/Target/TargetMachine.h"
38#include "llvm/Support/CommandLine.h"
39#include "llvm/Support/ErrorHandling.h"
40#include "llvm/Support/ManagedStatic.h"
41#include "llvm/Support/MathExtras.h"
42#include "llvm/Support/raw_ostream.h"
43#include "llvm/System/Mutex.h"
44#include "llvm/ADT/SetVector.h"
45#include "llvm/ADT/SmallPtrSet.h"
46#include "llvm/ADT/SmallSet.h"
47#include "llvm/ADT/SmallVector.h"
48#include "llvm/ADT/StringExtras.h"
49#include <algorithm>
50#include <cmath>
51using namespace llvm;
52
53/// makeVTList - Return an instance of the SDVTList struct initialized with the
54/// specified members.
55static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
56  SDVTList Res = {VTs, NumVTs};
57  return Res;
58}
59
60static const fltSemantics *EVTToAPFloatSemantics(EVT VT) {
61  switch (VT.getSimpleVT().SimpleTy) {
62  default: llvm_unreachable("Unknown FP format");
63  case MVT::f32:     return &APFloat::IEEEsingle;
64  case MVT::f64:     return &APFloat::IEEEdouble;
65  case MVT::f80:     return &APFloat::x87DoubleExtended;
66  case MVT::f128:    return &APFloat::IEEEquad;
67  case MVT::ppcf128: return &APFloat::PPCDoubleDouble;
68  }
69}
70
71SelectionDAG::DAGUpdateListener::~DAGUpdateListener() {}
72
73//===----------------------------------------------------------------------===//
74//                              ConstantFPSDNode Class
75//===----------------------------------------------------------------------===//
76
77/// isExactlyValue - We don't rely on operator== working on double values, as
78/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
79/// As such, this method can be used to do an exact bit-for-bit comparison of
80/// two floating point values.
81bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
82  return getValueAPF().bitwiseIsEqual(V);
83}
84
85bool ConstantFPSDNode::isValueValidForType(EVT VT,
86                                           const APFloat& Val) {
87  assert(VT.isFloatingPoint() && "Can only convert between FP types");
88
89  // PPC long double cannot be converted to any other type.
90  if (VT == MVT::ppcf128 ||
91      &Val.getSemantics() == &APFloat::PPCDoubleDouble)
92    return false;
93
94  // convert modifies in place, so make a copy.
95  APFloat Val2 = APFloat(Val);
96  bool losesInfo;
97  (void) Val2.convert(*EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
98                      &losesInfo);
99  return !losesInfo;
100}
101
102//===----------------------------------------------------------------------===//
103//                              ISD Namespace
104//===----------------------------------------------------------------------===//
105
106/// isBuildVectorAllOnes - Return true if the specified node is a
107/// BUILD_VECTOR where all of the elements are ~0 or undef.
108bool ISD::isBuildVectorAllOnes(const SDNode *N) {
109  // Look through a bit convert.
110  if (N->getOpcode() == ISD::BIT_CONVERT)
111    N = N->getOperand(0).getNode();
112
113  if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
114
115  unsigned i = 0, e = N->getNumOperands();
116
117  // Skip over all of the undef values.
118  while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
119    ++i;
120
121  // Do not accept an all-undef vector.
122  if (i == e) return false;
123
124  // Do not accept build_vectors that aren't all constants or which have non-~0
125  // elements.
126  SDValue NotZero = N->getOperand(i);
127  if (isa<ConstantSDNode>(NotZero)) {
128    if (!cast<ConstantSDNode>(NotZero)->isAllOnesValue())
129      return false;
130  } else if (isa<ConstantFPSDNode>(NotZero)) {
131    if (!cast<ConstantFPSDNode>(NotZero)->getValueAPF().
132                bitcastToAPInt().isAllOnesValue())
133      return false;
134  } else
135    return false;
136
137  // Okay, we have at least one ~0 value, check to see if the rest match or are
138  // undefs.
139  for (++i; i != e; ++i)
140    if (N->getOperand(i) != NotZero &&
141        N->getOperand(i).getOpcode() != ISD::UNDEF)
142      return false;
143  return true;
144}
145
146
147/// isBuildVectorAllZeros - Return true if the specified node is a
148/// BUILD_VECTOR where all of the elements are 0 or undef.
149bool ISD::isBuildVectorAllZeros(const SDNode *N) {
150  // Look through a bit convert.
151  if (N->getOpcode() == ISD::BIT_CONVERT)
152    N = N->getOperand(0).getNode();
153
154  if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
155
156  unsigned i = 0, e = N->getNumOperands();
157
158  // Skip over all of the undef values.
159  while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
160    ++i;
161
162  // Do not accept an all-undef vector.
163  if (i == e) return false;
164
165  // Do not accept build_vectors that aren't all constants or which have non-0
166  // elements.
167  SDValue Zero = N->getOperand(i);
168  if (isa<ConstantSDNode>(Zero)) {
169    if (!cast<ConstantSDNode>(Zero)->isNullValue())
170      return false;
171  } else if (isa<ConstantFPSDNode>(Zero)) {
172    if (!cast<ConstantFPSDNode>(Zero)->getValueAPF().isPosZero())
173      return false;
174  } else
175    return false;
176
177  // Okay, we have at least one 0 value, check to see if the rest match or are
178  // undefs.
179  for (++i; i != e; ++i)
180    if (N->getOperand(i) != Zero &&
181        N->getOperand(i).getOpcode() != ISD::UNDEF)
182      return false;
183  return true;
184}
185
186/// isScalarToVector - Return true if the specified node is a
187/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
188/// element is not an undef.
189bool ISD::isScalarToVector(const SDNode *N) {
190  if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
191    return true;
192
193  if (N->getOpcode() != ISD::BUILD_VECTOR)
194    return false;
195  if (N->getOperand(0).getOpcode() == ISD::UNDEF)
196    return false;
197  unsigned NumElems = N->getNumOperands();
198  for (unsigned i = 1; i < NumElems; ++i) {
199    SDValue V = N->getOperand(i);
200    if (V.getOpcode() != ISD::UNDEF)
201      return false;
202  }
203  return true;
204}
205
206/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
207/// when given the operation for (X op Y).
208ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
209  // To perform this operation, we just need to swap the L and G bits of the
210  // operation.
211  unsigned OldL = (Operation >> 2) & 1;
212  unsigned OldG = (Operation >> 1) & 1;
213  return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
214                       (OldL << 1) |       // New G bit
215                       (OldG << 2));       // New L bit.
216}
217
218/// getSetCCInverse - Return the operation corresponding to !(X op Y), where
219/// 'op' is a valid SetCC operation.
220ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
221  unsigned Operation = Op;
222  if (isInteger)
223    Operation ^= 7;   // Flip L, G, E bits, but not U.
224  else
225    Operation ^= 15;  // Flip all of the condition bits.
226
227  if (Operation > ISD::SETTRUE2)
228    Operation &= ~8;  // Don't let N and U bits get set.
229
230  return ISD::CondCode(Operation);
231}
232
233
234/// isSignedOp - For an integer comparison, return 1 if the comparison is a
235/// signed operation and 2 if the result is an unsigned comparison.  Return zero
236/// if the operation does not depend on the sign of the input (setne and seteq).
237static int isSignedOp(ISD::CondCode Opcode) {
238  switch (Opcode) {
239  default: llvm_unreachable("Illegal integer setcc operation!");
240  case ISD::SETEQ:
241  case ISD::SETNE: return 0;
242  case ISD::SETLT:
243  case ISD::SETLE:
244  case ISD::SETGT:
245  case ISD::SETGE: return 1;
246  case ISD::SETULT:
247  case ISD::SETULE:
248  case ISD::SETUGT:
249  case ISD::SETUGE: return 2;
250  }
251}
252
253/// getSetCCOrOperation - Return the result of a logical OR between different
254/// comparisons of identical values: ((X op1 Y) | (X op2 Y)).  This function
255/// returns SETCC_INVALID if it is not possible to represent the resultant
256/// comparison.
257ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
258                                       bool isInteger) {
259  if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
260    // Cannot fold a signed integer setcc with an unsigned integer setcc.
261    return ISD::SETCC_INVALID;
262
263  unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
264
265  // If the N and U bits get set then the resultant comparison DOES suddenly
266  // care about orderedness, and is true when ordered.
267  if (Op > ISD::SETTRUE2)
268    Op &= ~16;     // Clear the U bit if the N bit is set.
269
270  // Canonicalize illegal integer setcc's.
271  if (isInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
272    Op = ISD::SETNE;
273
274  return ISD::CondCode(Op);
275}
276
277/// getSetCCAndOperation - Return the result of a logical AND between different
278/// comparisons of identical values: ((X op1 Y) & (X op2 Y)).  This
279/// function returns zero if it is not possible to represent the resultant
280/// comparison.
281ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
282                                        bool isInteger) {
283  if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
284    // Cannot fold a signed setcc with an unsigned setcc.
285    return ISD::SETCC_INVALID;
286
287  // Combine all of the condition bits.
288  ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
289
290  // Canonicalize illegal integer setcc's.
291  if (isInteger) {
292    switch (Result) {
293    default: break;
294    case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
295    case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
296    case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
297    case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
298    case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
299    }
300  }
301
302  return Result;
303}
304
305const TargetMachine &SelectionDAG::getTarget() const {
306  return MF->getTarget();
307}
308
309//===----------------------------------------------------------------------===//
310//                           SDNode Profile Support
311//===----------------------------------------------------------------------===//
312
313/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
314///
315static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
316  ID.AddInteger(OpC);
317}
318
319/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
320/// solely with their pointer.
321static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
322  ID.AddPointer(VTList.VTs);
323}
324
325/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
326///
327static void AddNodeIDOperands(FoldingSetNodeID &ID,
328                              const SDValue *Ops, unsigned NumOps) {
329  for (; NumOps; --NumOps, ++Ops) {
330    ID.AddPointer(Ops->getNode());
331    ID.AddInteger(Ops->getResNo());
332  }
333}
334
335/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
336///
337static void AddNodeIDOperands(FoldingSetNodeID &ID,
338                              const SDUse *Ops, unsigned NumOps) {
339  for (; NumOps; --NumOps, ++Ops) {
340    ID.AddPointer(Ops->getNode());
341    ID.AddInteger(Ops->getResNo());
342  }
343}
344
345static void AddNodeIDNode(FoldingSetNodeID &ID,
346                          unsigned short OpC, SDVTList VTList,
347                          const SDValue *OpList, unsigned N) {
348  AddNodeIDOpcode(ID, OpC);
349  AddNodeIDValueTypes(ID, VTList);
350  AddNodeIDOperands(ID, OpList, N);
351}
352
353/// AddNodeIDCustom - If this is an SDNode with special info, add this info to
354/// the NodeID data.
355static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
356  switch (N->getOpcode()) {
357  case ISD::TargetExternalSymbol:
358  case ISD::ExternalSymbol:
359    llvm_unreachable("Should only be used on nodes with operands");
360  default: break;  // Normal nodes don't need extra info.
361  case ISD::TargetConstant:
362  case ISD::Constant:
363    ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
364    break;
365  case ISD::TargetConstantFP:
366  case ISD::ConstantFP: {
367    ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
368    break;
369  }
370  case ISD::TargetGlobalAddress:
371  case ISD::GlobalAddress:
372  case ISD::TargetGlobalTLSAddress:
373  case ISD::GlobalTLSAddress: {
374    const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
375    ID.AddPointer(GA->getGlobal());
376    ID.AddInteger(GA->getOffset());
377    ID.AddInteger(GA->getTargetFlags());
378    break;
379  }
380  case ISD::BasicBlock:
381    ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
382    break;
383  case ISD::Register:
384    ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
385    break;
386
387  case ISD::SRCVALUE:
388    ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
389    break;
390  case ISD::FrameIndex:
391  case ISD::TargetFrameIndex:
392    ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
393    break;
394  case ISD::JumpTable:
395  case ISD::TargetJumpTable:
396    ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
397    ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
398    break;
399  case ISD::ConstantPool:
400  case ISD::TargetConstantPool: {
401    const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
402    ID.AddInteger(CP->getAlignment());
403    ID.AddInteger(CP->getOffset());
404    if (CP->isMachineConstantPoolEntry())
405      CP->getMachineCPVal()->AddSelectionDAGCSEId(ID);
406    else
407      ID.AddPointer(CP->getConstVal());
408    ID.AddInteger(CP->getTargetFlags());
409    break;
410  }
411  case ISD::LOAD: {
412    const LoadSDNode *LD = cast<LoadSDNode>(N);
413    ID.AddInteger(LD->getMemoryVT().getRawBits());
414    ID.AddInteger(LD->getRawSubclassData());
415    break;
416  }
417  case ISD::STORE: {
418    const StoreSDNode *ST = cast<StoreSDNode>(N);
419    ID.AddInteger(ST->getMemoryVT().getRawBits());
420    ID.AddInteger(ST->getRawSubclassData());
421    break;
422  }
423  case ISD::ATOMIC_CMP_SWAP:
424  case ISD::ATOMIC_SWAP:
425  case ISD::ATOMIC_LOAD_ADD:
426  case ISD::ATOMIC_LOAD_SUB:
427  case ISD::ATOMIC_LOAD_AND:
428  case ISD::ATOMIC_LOAD_OR:
429  case ISD::ATOMIC_LOAD_XOR:
430  case ISD::ATOMIC_LOAD_NAND:
431  case ISD::ATOMIC_LOAD_MIN:
432  case ISD::ATOMIC_LOAD_MAX:
433  case ISD::ATOMIC_LOAD_UMIN:
434  case ISD::ATOMIC_LOAD_UMAX: {
435    const AtomicSDNode *AT = cast<AtomicSDNode>(N);
436    ID.AddInteger(AT->getMemoryVT().getRawBits());
437    ID.AddInteger(AT->getRawSubclassData());
438    break;
439  }
440  case ISD::VECTOR_SHUFFLE: {
441    const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
442    for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
443         i != e; ++i)
444      ID.AddInteger(SVN->getMaskElt(i));
445    break;
446  }
447  case ISD::TargetBlockAddress:
448  case ISD::BlockAddress: {
449    ID.AddPointer(cast<BlockAddressSDNode>(N)->getBlockAddress());
450    ID.AddInteger(cast<BlockAddressSDNode>(N)->getTargetFlags());
451    break;
452  }
453  } // end switch (N->getOpcode())
454}
455
456/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
457/// data.
458static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
459  AddNodeIDOpcode(ID, N->getOpcode());
460  // Add the return value info.
461  AddNodeIDValueTypes(ID, N->getVTList());
462  // Add the operand info.
463  AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
464
465  // Handle SDNode leafs with special info.
466  AddNodeIDCustom(ID, N);
467}
468
469/// encodeMemSDNodeFlags - Generic routine for computing a value for use in
470/// the CSE map that carries volatility, indexing mode, and
471/// extension/truncation information.
472///
473static inline unsigned
474encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile) {
475  assert((ConvType & 3) == ConvType &&
476         "ConvType may not require more than 2 bits!");
477  assert((AM & 7) == AM &&
478         "AM may not require more than 3 bits!");
479  return ConvType |
480         (AM << 2) |
481         (isVolatile << 5);
482}
483
484//===----------------------------------------------------------------------===//
485//                              SelectionDAG Class
486//===----------------------------------------------------------------------===//
487
488/// doNotCSE - Return true if CSE should not be performed for this node.
489static bool doNotCSE(SDNode *N) {
490  if (N->getValueType(0) == MVT::Flag)
491    return true; // Never CSE anything that produces a flag.
492
493  switch (N->getOpcode()) {
494  default: break;
495  case ISD::HANDLENODE:
496  case ISD::EH_LABEL:
497    return true;   // Never CSE these nodes.
498  }
499
500  // Check that remaining values produced are not flags.
501  for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
502    if (N->getValueType(i) == MVT::Flag)
503      return true; // Never CSE anything that produces a flag.
504
505  return false;
506}
507
508/// RemoveDeadNodes - This method deletes all unreachable nodes in the
509/// SelectionDAG.
510void SelectionDAG::RemoveDeadNodes() {
511  // Create a dummy node (which is not added to allnodes), that adds a reference
512  // to the root node, preventing it from being deleted.
513  HandleSDNode Dummy(getRoot());
514
515  SmallVector<SDNode*, 128> DeadNodes;
516
517  // Add all obviously-dead nodes to the DeadNodes worklist.
518  for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
519    if (I->use_empty())
520      DeadNodes.push_back(I);
521
522  RemoveDeadNodes(DeadNodes);
523
524  // If the root changed (e.g. it was a dead load, update the root).
525  setRoot(Dummy.getValue());
526}
527
528/// RemoveDeadNodes - This method deletes the unreachable nodes in the
529/// given list, and any nodes that become unreachable as a result.
530void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
531                                   DAGUpdateListener *UpdateListener) {
532
533  // Process the worklist, deleting the nodes and adding their uses to the
534  // worklist.
535  while (!DeadNodes.empty()) {
536    SDNode *N = DeadNodes.pop_back_val();
537
538    if (UpdateListener)
539      UpdateListener->NodeDeleted(N, 0);
540
541    // Take the node out of the appropriate CSE map.
542    RemoveNodeFromCSEMaps(N);
543
544    // Next, brutally remove the operand list.  This is safe to do, as there are
545    // no cycles in the graph.
546    for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
547      SDUse &Use = *I++;
548      SDNode *Operand = Use.getNode();
549      Use.set(SDValue());
550
551      // Now that we removed this operand, see if there are no uses of it left.
552      if (Operand->use_empty())
553        DeadNodes.push_back(Operand);
554    }
555
556    DeallocateNode(N);
557  }
558}
559
560void SelectionDAG::RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener){
561  SmallVector<SDNode*, 16> DeadNodes(1, N);
562  RemoveDeadNodes(DeadNodes, UpdateListener);
563}
564
565void SelectionDAG::DeleteNode(SDNode *N) {
566  // First take this out of the appropriate CSE map.
567  RemoveNodeFromCSEMaps(N);
568
569  // Finally, remove uses due to operands of this node, remove from the
570  // AllNodes list, and delete the node.
571  DeleteNodeNotInCSEMaps(N);
572}
573
574void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
575  assert(N != AllNodes.begin() && "Cannot delete the entry node!");
576  assert(N->use_empty() && "Cannot delete a node that is not dead!");
577
578  // Drop all of the operands and decrement used node's use counts.
579  N->DropOperands();
580
581  DeallocateNode(N);
582}
583
584void SelectionDAG::DeallocateNode(SDNode *N) {
585  if (N->OperandsNeedDelete)
586    delete[] N->OperandList;
587
588  // Set the opcode to DELETED_NODE to help catch bugs when node
589  // memory is reallocated.
590  N->NodeType = ISD::DELETED_NODE;
591
592  NodeAllocator.Deallocate(AllNodes.remove(N));
593
594  // Remove the ordering of this node.
595  if (Ordering) Ordering->remove(N);
596}
597
598/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
599/// correspond to it.  This is useful when we're about to delete or repurpose
600/// the node.  We don't want future request for structurally identical nodes
601/// to return N anymore.
602bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
603  bool Erased = false;
604  switch (N->getOpcode()) {
605  case ISD::EntryToken:
606    llvm_unreachable("EntryToken should not be in CSEMaps!");
607    return false;
608  case ISD::HANDLENODE: return false;  // noop.
609  case ISD::CONDCODE:
610    assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
611           "Cond code doesn't exist!");
612    Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
613    CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
614    break;
615  case ISD::ExternalSymbol:
616    Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
617    break;
618  case ISD::TargetExternalSymbol: {
619    ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
620    Erased = TargetExternalSymbols.erase(
621               std::pair<std::string,unsigned char>(ESN->getSymbol(),
622                                                    ESN->getTargetFlags()));
623    break;
624  }
625  case ISD::VALUETYPE: {
626    EVT VT = cast<VTSDNode>(N)->getVT();
627    if (VT.isExtended()) {
628      Erased = ExtendedValueTypeNodes.erase(VT);
629    } else {
630      Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
631      ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
632    }
633    break;
634  }
635  default:
636    // Remove it from the CSE Map.
637    Erased = CSEMap.RemoveNode(N);
638    break;
639  }
640#ifndef NDEBUG
641  // Verify that the node was actually in one of the CSE maps, unless it has a
642  // flag result (which cannot be CSE'd) or is one of the special cases that are
643  // not subject to CSE.
644  if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Flag &&
645      !N->isMachineOpcode() && !doNotCSE(N)) {
646    N->dump(this);
647    errs() << "\n";
648    llvm_unreachable("Node is not in map!");
649  }
650#endif
651  return Erased;
652}
653
654/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
655/// maps and modified in place. Add it back to the CSE maps, unless an identical
656/// node already exists, in which case transfer all its users to the existing
657/// node. This transfer can potentially trigger recursive merging.
658///
659void
660SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N,
661                                       DAGUpdateListener *UpdateListener) {
662  // For node types that aren't CSE'd, just act as if no identical node
663  // already exists.
664  if (!doNotCSE(N)) {
665    SDNode *Existing = CSEMap.GetOrInsertNode(N);
666    if (Existing != N) {
667      // If there was already an existing matching node, use ReplaceAllUsesWith
668      // to replace the dead one with the existing one.  This can cause
669      // recursive merging of other unrelated nodes down the line.
670      ReplaceAllUsesWith(N, Existing, UpdateListener);
671
672      // N is now dead.  Inform the listener if it exists and delete it.
673      if (UpdateListener)
674        UpdateListener->NodeDeleted(N, Existing);
675      DeleteNodeNotInCSEMaps(N);
676      return;
677    }
678  }
679
680  // If the node doesn't already exist, we updated it.  Inform a listener if
681  // it exists.
682  if (UpdateListener)
683    UpdateListener->NodeUpdated(N);
684}
685
686/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
687/// were replaced with those specified.  If this node is never memoized,
688/// return null, otherwise return a pointer to the slot it would take.  If a
689/// node already exists with these operands, the slot will be non-null.
690SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
691                                           void *&InsertPos) {
692  if (doNotCSE(N))
693    return 0;
694
695  SDValue Ops[] = { Op };
696  FoldingSetNodeID ID;
697  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
698  AddNodeIDCustom(ID, N);
699  SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
700  return Node;
701}
702
703/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
704/// were replaced with those specified.  If this node is never memoized,
705/// return null, otherwise return a pointer to the slot it would take.  If a
706/// node already exists with these operands, the slot will be non-null.
707SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
708                                           SDValue Op1, SDValue Op2,
709                                           void *&InsertPos) {
710  if (doNotCSE(N))
711    return 0;
712
713  SDValue Ops[] = { Op1, Op2 };
714  FoldingSetNodeID ID;
715  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
716  AddNodeIDCustom(ID, N);
717  SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
718  return Node;
719}
720
721
722/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
723/// were replaced with those specified.  If this node is never memoized,
724/// return null, otherwise return a pointer to the slot it would take.  If a
725/// node already exists with these operands, the slot will be non-null.
726SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
727                                           const SDValue *Ops,unsigned NumOps,
728                                           void *&InsertPos) {
729  if (doNotCSE(N))
730    return 0;
731
732  FoldingSetNodeID ID;
733  AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
734  AddNodeIDCustom(ID, N);
735  SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
736  return Node;
737}
738
739/// VerifyNode - Sanity check the given node.  Aborts if it is invalid.
740void SelectionDAG::VerifyNode(SDNode *N) {
741  switch (N->getOpcode()) {
742  default:
743    break;
744  case ISD::BUILD_PAIR: {
745    EVT VT = N->getValueType(0);
746    assert(N->getNumValues() == 1 && "Too many results!");
747    assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
748           "Wrong return type!");
749    assert(N->getNumOperands() == 2 && "Wrong number of operands!");
750    assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
751           "Mismatched operand types!");
752    assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
753           "Wrong operand type!");
754    assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
755           "Wrong return type size");
756    break;
757  }
758  case ISD::BUILD_VECTOR: {
759    assert(N->getNumValues() == 1 && "Too many results!");
760    assert(N->getValueType(0).isVector() && "Wrong return type!");
761    assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
762           "Wrong number of operands!");
763    EVT EltVT = N->getValueType(0).getVectorElementType();
764    for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I)
765      assert((I->getValueType() == EltVT ||
766             (EltVT.isInteger() && I->getValueType().isInteger() &&
767              EltVT.bitsLE(I->getValueType()))) &&
768            "Wrong operand type!");
769    break;
770  }
771  }
772}
773
774/// getEVTAlignment - Compute the default alignment value for the
775/// given type.
776///
777unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
778  const Type *Ty = VT == MVT::iPTR ?
779                   PointerType::get(Type::getInt8Ty(*getContext()), 0) :
780                   VT.getTypeForEVT(*getContext());
781
782  return TLI.getTargetData()->getABITypeAlignment(Ty);
783}
784
785// EntryNode could meaningfully have debug info if we can find it...
786SelectionDAG::SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli)
787  : TLI(tli), FLI(fli), DW(0),
788    EntryNode(ISD::EntryToken, DebugLoc::getUnknownLoc(),
789              getVTList(MVT::Other)),
790    Root(getEntryNode()), Ordering(0) {
791  AllNodes.push_back(&EntryNode);
792  if (DisableScheduling)
793    Ordering = new SDNodeOrdering();
794}
795
796void SelectionDAG::init(MachineFunction &mf, MachineModuleInfo *mmi,
797                        DwarfWriter *dw) {
798  MF = &mf;
799  MMI = mmi;
800  DW = dw;
801  Context = &mf.getFunction()->getContext();
802}
803
804SelectionDAG::~SelectionDAG() {
805  allnodes_clear();
806  delete Ordering;
807}
808
809void SelectionDAG::allnodes_clear() {
810  assert(&*AllNodes.begin() == &EntryNode);
811  AllNodes.remove(AllNodes.begin());
812  while (!AllNodes.empty())
813    DeallocateNode(AllNodes.begin());
814}
815
816void SelectionDAG::clear() {
817  allnodes_clear();
818  OperandAllocator.Reset();
819  CSEMap.clear();
820
821  ExtendedValueTypeNodes.clear();
822  ExternalSymbols.clear();
823  TargetExternalSymbols.clear();
824  std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
825            static_cast<CondCodeSDNode*>(0));
826  std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
827            static_cast<SDNode*>(0));
828
829  EntryNode.UseList = 0;
830  AllNodes.push_back(&EntryNode);
831  Root = getEntryNode();
832  if (DisableScheduling)
833    Ordering = new SDNodeOrdering();
834}
835
836SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
837  return VT.bitsGT(Op.getValueType()) ?
838    getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
839    getNode(ISD::TRUNCATE, DL, VT, Op);
840}
841
842SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
843  return VT.bitsGT(Op.getValueType()) ?
844    getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
845    getNode(ISD::TRUNCATE, DL, VT, Op);
846}
847
848SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, EVT VT) {
849  assert(!VT.isVector() &&
850         "getZeroExtendInReg should use the vector element type instead of "
851         "the vector type!");
852  if (Op.getValueType() == VT) return Op;
853  unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
854  APInt Imm = APInt::getLowBitsSet(BitWidth,
855                                   VT.getSizeInBits());
856  return getNode(ISD::AND, DL, Op.getValueType(), Op,
857                 getConstant(Imm, Op.getValueType()));
858}
859
860/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
861///
862SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, EVT VT) {
863  EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
864  SDValue NegOne =
865    getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
866  return getNode(ISD::XOR, DL, VT, Val, NegOne);
867}
868
869SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
870  EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
871  assert((EltVT.getSizeInBits() >= 64 ||
872         (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
873         "getConstant with a uint64_t value that doesn't fit in the type!");
874  return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
875}
876
877SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
878  return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
879}
880
881SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
882  assert(VT.isInteger() && "Cannot create FP integer constant!");
883
884  EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
885  assert(Val.getBitWidth() == EltVT.getSizeInBits() &&
886         "APInt size does not match type size!");
887
888  unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
889  FoldingSetNodeID ID;
890  AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
891  ID.AddPointer(&Val);
892  void *IP = 0;
893  SDNode *N = NULL;
894  if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
895    if (!VT.isVector())
896      return SDValue(N, 0);
897
898  if (!N) {
899    N = NodeAllocator.Allocate<ConstantSDNode>();
900    new (N) ConstantSDNode(isT, &Val, EltVT);
901    CSEMap.InsertNode(N, IP);
902    AllNodes.push_back(N);
903  }
904
905  SDValue Result(N, 0);
906  if (VT.isVector()) {
907    SmallVector<SDValue, 8> Ops;
908    Ops.assign(VT.getVectorNumElements(), Result);
909    Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
910                     VT, &Ops[0], Ops.size());
911  }
912  return Result;
913}
914
915SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
916  return getConstant(Val, TLI.getPointerTy(), isTarget);
917}
918
919
920SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
921  return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
922}
923
924SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
925  assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
926
927  EVT EltVT =
928    VT.isVector() ? VT.getVectorElementType() : VT;
929
930  // Do the map lookup using the actual bit pattern for the floating point
931  // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
932  // we don't have issues with SNANs.
933  unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
934  FoldingSetNodeID ID;
935  AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
936  ID.AddPointer(&V);
937  void *IP = 0;
938  SDNode *N = NULL;
939  if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
940    if (!VT.isVector())
941      return SDValue(N, 0);
942
943  if (!N) {
944    N = NodeAllocator.Allocate<ConstantFPSDNode>();
945    new (N) ConstantFPSDNode(isTarget, &V, EltVT);
946    CSEMap.InsertNode(N, IP);
947    AllNodes.push_back(N);
948  }
949
950  SDValue Result(N, 0);
951  if (VT.isVector()) {
952    SmallVector<SDValue, 8> Ops;
953    Ops.assign(VT.getVectorNumElements(), Result);
954    // FIXME DebugLoc info might be appropriate here
955    Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
956                     VT, &Ops[0], Ops.size());
957  }
958  return Result;
959}
960
961SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
962  EVT EltVT =
963    VT.isVector() ? VT.getVectorElementType() : VT;
964  if (EltVT==MVT::f32)
965    return getConstantFP(APFloat((float)Val), VT, isTarget);
966  else
967    return getConstantFP(APFloat(Val), VT, isTarget);
968}
969
970SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
971                                       EVT VT, int64_t Offset,
972                                       bool isTargetGA,
973                                       unsigned char TargetFlags) {
974  assert((TargetFlags == 0 || isTargetGA) &&
975         "Cannot set target flags on target-independent globals");
976
977  // Truncate (with sign-extension) the offset value to the pointer size.
978  EVT PTy = TLI.getPointerTy();
979  unsigned BitWidth = PTy.getSizeInBits();
980  if (BitWidth < 64)
981    Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth));
982
983  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
984  if (!GVar) {
985    // If GV is an alias then use the aliasee for determining thread-localness.
986    if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
987      GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
988  }
989
990  unsigned Opc;
991  if (GVar && GVar->isThreadLocal())
992    Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
993  else
994    Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
995
996  FoldingSetNodeID ID;
997  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
998  ID.AddPointer(GV);
999  ID.AddInteger(Offset);
1000  ID.AddInteger(TargetFlags);
1001  void *IP = 0;
1002  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1003    return SDValue(E, 0);
1004
1005  SDNode *N = NodeAllocator.Allocate<GlobalAddressSDNode>();
1006  new (N) GlobalAddressSDNode(Opc, GV, VT, Offset, TargetFlags);
1007  CSEMap.InsertNode(N, IP);
1008  AllNodes.push_back(N);
1009  return SDValue(N, 0);
1010}
1011
1012SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1013  unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1014  FoldingSetNodeID ID;
1015  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1016  ID.AddInteger(FI);
1017  void *IP = 0;
1018  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1019    return SDValue(E, 0);
1020
1021  SDNode *N = NodeAllocator.Allocate<FrameIndexSDNode>();
1022  new (N) FrameIndexSDNode(FI, VT, isTarget);
1023  CSEMap.InsertNode(N, IP);
1024  AllNodes.push_back(N);
1025  return SDValue(N, 0);
1026}
1027
1028SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1029                                   unsigned char TargetFlags) {
1030  assert((TargetFlags == 0 || isTarget) &&
1031         "Cannot set target flags on target-independent jump tables");
1032  unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1033  FoldingSetNodeID ID;
1034  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1035  ID.AddInteger(JTI);
1036  ID.AddInteger(TargetFlags);
1037  void *IP = 0;
1038  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1039    return SDValue(E, 0);
1040
1041  SDNode *N = NodeAllocator.Allocate<JumpTableSDNode>();
1042  new (N) JumpTableSDNode(JTI, VT, isTarget, TargetFlags);
1043  CSEMap.InsertNode(N, IP);
1044  AllNodes.push_back(N);
1045  return SDValue(N, 0);
1046}
1047
1048SDValue SelectionDAG::getConstantPool(Constant *C, EVT VT,
1049                                      unsigned Alignment, int Offset,
1050                                      bool isTarget,
1051                                      unsigned char TargetFlags) {
1052  assert((TargetFlags == 0 || isTarget) &&
1053         "Cannot set target flags on target-independent globals");
1054  if (Alignment == 0)
1055    Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
1056  unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1057  FoldingSetNodeID ID;
1058  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1059  ID.AddInteger(Alignment);
1060  ID.AddInteger(Offset);
1061  ID.AddPointer(C);
1062  ID.AddInteger(TargetFlags);
1063  void *IP = 0;
1064  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1065    return SDValue(E, 0);
1066
1067  SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>();
1068  new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment, TargetFlags);
1069  CSEMap.InsertNode(N, IP);
1070  AllNodes.push_back(N);
1071  return SDValue(N, 0);
1072}
1073
1074
1075SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1076                                      unsigned Alignment, int Offset,
1077                                      bool isTarget,
1078                                      unsigned char TargetFlags) {
1079  assert((TargetFlags == 0 || isTarget) &&
1080         "Cannot set target flags on target-independent globals");
1081  if (Alignment == 0)
1082    Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
1083  unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1084  FoldingSetNodeID ID;
1085  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1086  ID.AddInteger(Alignment);
1087  ID.AddInteger(Offset);
1088  C->AddSelectionDAGCSEId(ID);
1089  ID.AddInteger(TargetFlags);
1090  void *IP = 0;
1091  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1092    return SDValue(E, 0);
1093
1094  SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>();
1095  new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment, TargetFlags);
1096  CSEMap.InsertNode(N, IP);
1097  AllNodes.push_back(N);
1098  return SDValue(N, 0);
1099}
1100
1101SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1102  FoldingSetNodeID ID;
1103  AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1104  ID.AddPointer(MBB);
1105  void *IP = 0;
1106  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1107    return SDValue(E, 0);
1108
1109  SDNode *N = NodeAllocator.Allocate<BasicBlockSDNode>();
1110  new (N) BasicBlockSDNode(MBB);
1111  CSEMap.InsertNode(N, IP);
1112  AllNodes.push_back(N);
1113  return SDValue(N, 0);
1114}
1115
1116SDValue SelectionDAG::getValueType(EVT VT) {
1117  if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1118      ValueTypeNodes.size())
1119    ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1120
1121  SDNode *&N = VT.isExtended() ?
1122    ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1123
1124  if (N) return SDValue(N, 0);
1125  N = NodeAllocator.Allocate<VTSDNode>();
1126  new (N) VTSDNode(VT);
1127  AllNodes.push_back(N);
1128  return SDValue(N, 0);
1129}
1130
1131SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1132  SDNode *&N = ExternalSymbols[Sym];
1133  if (N) return SDValue(N, 0);
1134  N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
1135  new (N) ExternalSymbolSDNode(false, Sym, 0, VT);
1136  AllNodes.push_back(N);
1137  return SDValue(N, 0);
1138}
1139
1140SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1141                                              unsigned char TargetFlags) {
1142  SDNode *&N =
1143    TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1144                                                               TargetFlags)];
1145  if (N) return SDValue(N, 0);
1146  N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
1147  new (N) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1148  AllNodes.push_back(N);
1149  return SDValue(N, 0);
1150}
1151
1152SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1153  if ((unsigned)Cond >= CondCodeNodes.size())
1154    CondCodeNodes.resize(Cond+1);
1155
1156  if (CondCodeNodes[Cond] == 0) {
1157    CondCodeSDNode *N = NodeAllocator.Allocate<CondCodeSDNode>();
1158    new (N) CondCodeSDNode(Cond);
1159    CondCodeNodes[Cond] = N;
1160    AllNodes.push_back(N);
1161  }
1162
1163  return SDValue(CondCodeNodes[Cond], 0);
1164}
1165
1166// commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1167// the shuffle mask M that point at N1 to point at N2, and indices that point
1168// N2 to point at N1.
1169static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1170  std::swap(N1, N2);
1171  int NElts = M.size();
1172  for (int i = 0; i != NElts; ++i) {
1173    if (M[i] >= NElts)
1174      M[i] -= NElts;
1175    else if (M[i] >= 0)
1176      M[i] += NElts;
1177  }
1178}
1179
1180SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1,
1181                                       SDValue N2, const int *Mask) {
1182  assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE");
1183  assert(VT.isVector() && N1.getValueType().isVector() &&
1184         "Vector Shuffle VTs must be a vectors");
1185  assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType()
1186         && "Vector Shuffle VTs must have same element type");
1187
1188  // Canonicalize shuffle undef, undef -> undef
1189  if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1190    return getUNDEF(VT);
1191
1192  // Validate that all indices in Mask are within the range of the elements
1193  // input to the shuffle.
1194  unsigned NElts = VT.getVectorNumElements();
1195  SmallVector<int, 8> MaskVec;
1196  for (unsigned i = 0; i != NElts; ++i) {
1197    assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1198    MaskVec.push_back(Mask[i]);
1199  }
1200
1201  // Canonicalize shuffle v, v -> v, undef
1202  if (N1 == N2) {
1203    N2 = getUNDEF(VT);
1204    for (unsigned i = 0; i != NElts; ++i)
1205      if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1206  }
1207
1208  // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1209  if (N1.getOpcode() == ISD::UNDEF)
1210    commuteShuffle(N1, N2, MaskVec);
1211
1212  // Canonicalize all index into lhs, -> shuffle lhs, undef
1213  // Canonicalize all index into rhs, -> shuffle rhs, undef
1214  bool AllLHS = true, AllRHS = true;
1215  bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1216  for (unsigned i = 0; i != NElts; ++i) {
1217    if (MaskVec[i] >= (int)NElts) {
1218      if (N2Undef)
1219        MaskVec[i] = -1;
1220      else
1221        AllLHS = false;
1222    } else if (MaskVec[i] >= 0) {
1223      AllRHS = false;
1224    }
1225  }
1226  if (AllLHS && AllRHS)
1227    return getUNDEF(VT);
1228  if (AllLHS && !N2Undef)
1229    N2 = getUNDEF(VT);
1230  if (AllRHS) {
1231    N1 = getUNDEF(VT);
1232    commuteShuffle(N1, N2, MaskVec);
1233  }
1234
1235  // If Identity shuffle, or all shuffle in to undef, return that node.
1236  bool AllUndef = true;
1237  bool Identity = true;
1238  for (unsigned i = 0; i != NElts; ++i) {
1239    if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1240    if (MaskVec[i] >= 0) AllUndef = false;
1241  }
1242  if (Identity && NElts == N1.getValueType().getVectorNumElements())
1243    return N1;
1244  if (AllUndef)
1245    return getUNDEF(VT);
1246
1247  FoldingSetNodeID ID;
1248  SDValue Ops[2] = { N1, N2 };
1249  AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1250  for (unsigned i = 0; i != NElts; ++i)
1251    ID.AddInteger(MaskVec[i]);
1252
1253  void* IP = 0;
1254  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1255    return SDValue(E, 0);
1256
1257  // Allocate the mask array for the node out of the BumpPtrAllocator, since
1258  // SDNode doesn't have access to it.  This memory will be "leaked" when
1259  // the node is deallocated, but recovered when the NodeAllocator is released.
1260  int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1261  memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1262
1263  ShuffleVectorSDNode *N = NodeAllocator.Allocate<ShuffleVectorSDNode>();
1264  new (N) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
1265  CSEMap.InsertNode(N, IP);
1266  AllNodes.push_back(N);
1267  return SDValue(N, 0);
1268}
1269
1270SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl,
1271                                       SDValue Val, SDValue DTy,
1272                                       SDValue STy, SDValue Rnd, SDValue Sat,
1273                                       ISD::CvtCode Code) {
1274  // If the src and dest types are the same and the conversion is between
1275  // integer types of the same sign or two floats, no conversion is necessary.
1276  if (DTy == STy &&
1277      (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1278    return Val;
1279
1280  FoldingSetNodeID ID;
1281  SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1282  AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1283  void* IP = 0;
1284  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1285    return SDValue(E, 0);
1286
1287  CvtRndSatSDNode *N = NodeAllocator.Allocate<CvtRndSatSDNode>();
1288  new (N) CvtRndSatSDNode(VT, dl, Ops, 5, Code);
1289  CSEMap.InsertNode(N, IP);
1290  AllNodes.push_back(N);
1291  return SDValue(N, 0);
1292}
1293
1294SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1295  FoldingSetNodeID ID;
1296  AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1297  ID.AddInteger(RegNo);
1298  void *IP = 0;
1299  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1300    return SDValue(E, 0);
1301
1302  SDNode *N = NodeAllocator.Allocate<RegisterSDNode>();
1303  new (N) RegisterSDNode(RegNo, VT);
1304  CSEMap.InsertNode(N, IP);
1305  AllNodes.push_back(N);
1306  return SDValue(N, 0);
1307}
1308
1309SDValue SelectionDAG::getLabel(unsigned Opcode, DebugLoc dl,
1310                               SDValue Root,
1311                               unsigned LabelID) {
1312  FoldingSetNodeID ID;
1313  SDValue Ops[] = { Root };
1314  AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1);
1315  ID.AddInteger(LabelID);
1316  void *IP = 0;
1317  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1318    return SDValue(E, 0);
1319
1320  SDNode *N = NodeAllocator.Allocate<LabelSDNode>();
1321  new (N) LabelSDNode(Opcode, dl, Root, LabelID);
1322  CSEMap.InsertNode(N, IP);
1323  AllNodes.push_back(N);
1324  return SDValue(N, 0);
1325}
1326
1327SDValue SelectionDAG::getBlockAddress(BlockAddress *BA, EVT VT,
1328                                      bool isTarget,
1329                                      unsigned char TargetFlags) {
1330  unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1331
1332  FoldingSetNodeID ID;
1333  AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1334  ID.AddPointer(BA);
1335  ID.AddInteger(TargetFlags);
1336  void *IP = 0;
1337  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1338    return SDValue(E, 0);
1339
1340  SDNode *N = NodeAllocator.Allocate<BlockAddressSDNode>();
1341  new (N) BlockAddressSDNode(Opc, VT, BA, TargetFlags);
1342  CSEMap.InsertNode(N, IP);
1343  AllNodes.push_back(N);
1344  return SDValue(N, 0);
1345}
1346
1347SDValue SelectionDAG::getSrcValue(const Value *V) {
1348  assert((!V || isa<PointerType>(V->getType())) &&
1349         "SrcValue is not a pointer?");
1350
1351  FoldingSetNodeID ID;
1352  AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1353  ID.AddPointer(V);
1354
1355  void *IP = 0;
1356  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1357    return SDValue(E, 0);
1358
1359  SDNode *N = NodeAllocator.Allocate<SrcValueSDNode>();
1360  new (N) SrcValueSDNode(V);
1361  CSEMap.InsertNode(N, IP);
1362  AllNodes.push_back(N);
1363  return SDValue(N, 0);
1364}
1365
1366/// getShiftAmountOperand - Return the specified value casted to
1367/// the target's desired shift amount type.
1368SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
1369  EVT OpTy = Op.getValueType();
1370  MVT ShTy = TLI.getShiftAmountTy();
1371  if (OpTy == ShTy || OpTy.isVector()) return Op;
1372
1373  ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ?  ISD::TRUNCATE : ISD::ZERO_EXTEND;
1374  return getNode(Opcode, Op.getDebugLoc(), ShTy, Op);
1375}
1376
1377/// CreateStackTemporary - Create a stack temporary, suitable for holding the
1378/// specified value type.
1379SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1380  MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1381  unsigned ByteSize = VT.getStoreSize();
1382  const Type *Ty = VT.getTypeForEVT(*getContext());
1383  unsigned StackAlign =
1384  std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
1385
1386  int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1387  return getFrameIndex(FrameIdx, TLI.getPointerTy());
1388}
1389
1390/// CreateStackTemporary - Create a stack temporary suitable for holding
1391/// either of the specified value types.
1392SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1393  unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1394                            VT2.getStoreSizeInBits())/8;
1395  const Type *Ty1 = VT1.getTypeForEVT(*getContext());
1396  const Type *Ty2 = VT2.getTypeForEVT(*getContext());
1397  const TargetData *TD = TLI.getTargetData();
1398  unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1399                            TD->getPrefTypeAlignment(Ty2));
1400
1401  MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1402  int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1403  return getFrameIndex(FrameIdx, TLI.getPointerTy());
1404}
1405
1406SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1407                                SDValue N2, ISD::CondCode Cond, DebugLoc dl) {
1408  // These setcc operations always fold.
1409  switch (Cond) {
1410  default: break;
1411  case ISD::SETFALSE:
1412  case ISD::SETFALSE2: return getConstant(0, VT);
1413  case ISD::SETTRUE:
1414  case ISD::SETTRUE2:  return getConstant(1, VT);
1415
1416  case ISD::SETOEQ:
1417  case ISD::SETOGT:
1418  case ISD::SETOGE:
1419  case ISD::SETOLT:
1420  case ISD::SETOLE:
1421  case ISD::SETONE:
1422  case ISD::SETO:
1423  case ISD::SETUO:
1424  case ISD::SETUEQ:
1425  case ISD::SETUNE:
1426    assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1427    break;
1428  }
1429
1430  if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1431    const APInt &C2 = N2C->getAPIntValue();
1432    if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1433      const APInt &C1 = N1C->getAPIntValue();
1434
1435      switch (Cond) {
1436      default: llvm_unreachable("Unknown integer setcc!");
1437      case ISD::SETEQ:  return getConstant(C1 == C2, VT);
1438      case ISD::SETNE:  return getConstant(C1 != C2, VT);
1439      case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1440      case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1441      case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1442      case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1443      case ISD::SETLT:  return getConstant(C1.slt(C2), VT);
1444      case ISD::SETGT:  return getConstant(C1.sgt(C2), VT);
1445      case ISD::SETLE:  return getConstant(C1.sle(C2), VT);
1446      case ISD::SETGE:  return getConstant(C1.sge(C2), VT);
1447      }
1448    }
1449  }
1450  if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1451    if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1452      // No compile time operations on this type yet.
1453      if (N1C->getValueType(0) == MVT::ppcf128)
1454        return SDValue();
1455
1456      APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1457      switch (Cond) {
1458      default: break;
1459      case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
1460                          return getUNDEF(VT);
1461                        // fall through
1462      case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1463      case ISD::SETNE:  if (R==APFloat::cmpUnordered)
1464                          return getUNDEF(VT);
1465                        // fall through
1466      case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1467                                           R==APFloat::cmpLessThan, VT);
1468      case ISD::SETLT:  if (R==APFloat::cmpUnordered)
1469                          return getUNDEF(VT);
1470                        // fall through
1471      case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1472      case ISD::SETGT:  if (R==APFloat::cmpUnordered)
1473                          return getUNDEF(VT);
1474                        // fall through
1475      case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1476      case ISD::SETLE:  if (R==APFloat::cmpUnordered)
1477                          return getUNDEF(VT);
1478                        // fall through
1479      case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1480                                           R==APFloat::cmpEqual, VT);
1481      case ISD::SETGE:  if (R==APFloat::cmpUnordered)
1482                          return getUNDEF(VT);
1483                        // fall through
1484      case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1485                                           R==APFloat::cmpEqual, VT);
1486      case ISD::SETO:   return getConstant(R!=APFloat::cmpUnordered, VT);
1487      case ISD::SETUO:  return getConstant(R==APFloat::cmpUnordered, VT);
1488      case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1489                                           R==APFloat::cmpEqual, VT);
1490      case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1491      case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1492                                           R==APFloat::cmpLessThan, VT);
1493      case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1494                                           R==APFloat::cmpUnordered, VT);
1495      case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1496      case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1497      }
1498    } else {
1499      // Ensure that the constant occurs on the RHS.
1500      return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond));
1501    }
1502  }
1503
1504  // Could not fold it.
1505  return SDValue();
1506}
1507
1508/// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
1509/// use this predicate to simplify operations downstream.
1510bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1511  // This predicate is not safe for vector operations.
1512  if (Op.getValueType().isVector())
1513    return false;
1514
1515  unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1516  return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1517}
1518
1519/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
1520/// this predicate to simplify operations downstream.  Mask is known to be zero
1521/// for bits that V cannot have.
1522bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1523                                     unsigned Depth) const {
1524  APInt KnownZero, KnownOne;
1525  ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
1526  assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1527  return (KnownZero & Mask) == Mask;
1528}
1529
1530/// ComputeMaskedBits - Determine which of the bits specified in Mask are
1531/// known to be either zero or one and return them in the KnownZero/KnownOne
1532/// bitsets.  This code only analyzes bits in Mask, in order to short-circuit
1533/// processing.
1534void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
1535                                     APInt &KnownZero, APInt &KnownOne,
1536                                     unsigned Depth) const {
1537  unsigned BitWidth = Mask.getBitWidth();
1538  assert(BitWidth == Op.getValueType().getScalarType().getSizeInBits() &&
1539         "Mask size mismatches value type size!");
1540
1541  KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
1542  if (Depth == 6 || Mask == 0)
1543    return;  // Limit search depth.
1544
1545  APInt KnownZero2, KnownOne2;
1546
1547  switch (Op.getOpcode()) {
1548  case ISD::Constant:
1549    // We know all of the bits for a constant!
1550    KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & Mask;
1551    KnownZero = ~KnownOne & Mask;
1552    return;
1553  case ISD::AND:
1554    // If either the LHS or the RHS are Zero, the result is zero.
1555    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1556    ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownZero,
1557                      KnownZero2, KnownOne2, Depth+1);
1558    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1559    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1560
1561    // Output known-1 bits are only known if set in both the LHS & RHS.
1562    KnownOne &= KnownOne2;
1563    // Output known-0 are known to be clear if zero in either the LHS | RHS.
1564    KnownZero |= KnownZero2;
1565    return;
1566  case ISD::OR:
1567    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1568    ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownOne,
1569                      KnownZero2, KnownOne2, Depth+1);
1570    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1571    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1572
1573    // Output known-0 bits are only known if clear in both the LHS & RHS.
1574    KnownZero &= KnownZero2;
1575    // Output known-1 are known to be set if set in either the LHS | RHS.
1576    KnownOne |= KnownOne2;
1577    return;
1578  case ISD::XOR: {
1579    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
1580    ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
1581    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1582    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1583
1584    // Output known-0 bits are known if clear or set in both the LHS & RHS.
1585    APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1586    // Output known-1 are known to be set if set in only one of the LHS, RHS.
1587    KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1588    KnownZero = KnownZeroOut;
1589    return;
1590  }
1591  case ISD::MUL: {
1592    APInt Mask2 = APInt::getAllOnesValue(BitWidth);
1593    ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero, KnownOne, Depth+1);
1594    ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
1595    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1596    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1597
1598    // If low bits are zero in either operand, output low known-0 bits.
1599    // Also compute a conserative estimate for high known-0 bits.
1600    // More trickiness is possible, but this is sufficient for the
1601    // interesting case of alignment computation.
1602    KnownOne.clear();
1603    unsigned TrailZ = KnownZero.countTrailingOnes() +
1604                      KnownZero2.countTrailingOnes();
1605    unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
1606                               KnownZero2.countLeadingOnes(),
1607                               BitWidth) - BitWidth;
1608
1609    TrailZ = std::min(TrailZ, BitWidth);
1610    LeadZ = std::min(LeadZ, BitWidth);
1611    KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1612                APInt::getHighBitsSet(BitWidth, LeadZ);
1613    KnownZero &= Mask;
1614    return;
1615  }
1616  case ISD::UDIV: {
1617    // For the purposes of computing leading zeros we can conservatively
1618    // treat a udiv as a logical right shift by the power of 2 known to
1619    // be less than the denominator.
1620    APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1621    ComputeMaskedBits(Op.getOperand(0),
1622                      AllOnes, KnownZero2, KnownOne2, Depth+1);
1623    unsigned LeadZ = KnownZero2.countLeadingOnes();
1624
1625    KnownOne2.clear();
1626    KnownZero2.clear();
1627    ComputeMaskedBits(Op.getOperand(1),
1628                      AllOnes, KnownZero2, KnownOne2, Depth+1);
1629    unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1630    if (RHSUnknownLeadingOnes != BitWidth)
1631      LeadZ = std::min(BitWidth,
1632                       LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1633
1634    KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
1635    return;
1636  }
1637  case ISD::SELECT:
1638    ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
1639    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
1640    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1641    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1642
1643    // Only known if known in both the LHS and RHS.
1644    KnownOne &= KnownOne2;
1645    KnownZero &= KnownZero2;
1646    return;
1647  case ISD::SELECT_CC:
1648    ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
1649    ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
1650    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1651    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1652
1653    // Only known if known in both the LHS and RHS.
1654    KnownOne &= KnownOne2;
1655    KnownZero &= KnownZero2;
1656    return;
1657  case ISD::SADDO:
1658  case ISD::UADDO:
1659  case ISD::SSUBO:
1660  case ISD::USUBO:
1661  case ISD::SMULO:
1662  case ISD::UMULO:
1663    if (Op.getResNo() != 1)
1664      return;
1665    // The boolean result conforms to getBooleanContents.  Fall through.
1666  case ISD::SETCC:
1667    // If we know the result of a setcc has the top bits zero, use this info.
1668    if (TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent &&
1669        BitWidth > 1)
1670      KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1671    return;
1672  case ISD::SHL:
1673    // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1674    if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1675      unsigned ShAmt = SA->getZExtValue();
1676
1677      // If the shift count is an invalid immediate, don't do anything.
1678      if (ShAmt >= BitWidth)
1679        return;
1680
1681      ComputeMaskedBits(Op.getOperand(0), Mask.lshr(ShAmt),
1682                        KnownZero, KnownOne, Depth+1);
1683      assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1684      KnownZero <<= ShAmt;
1685      KnownOne  <<= ShAmt;
1686      // low bits known zero.
1687      KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1688    }
1689    return;
1690  case ISD::SRL:
1691    // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1692    if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1693      unsigned ShAmt = SA->getZExtValue();
1694
1695      // If the shift count is an invalid immediate, don't do anything.
1696      if (ShAmt >= BitWidth)
1697        return;
1698
1699      ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt),
1700                        KnownZero, KnownOne, Depth+1);
1701      assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1702      KnownZero = KnownZero.lshr(ShAmt);
1703      KnownOne  = KnownOne.lshr(ShAmt);
1704
1705      APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
1706      KnownZero |= HighBits;  // High bits known zero.
1707    }
1708    return;
1709  case ISD::SRA:
1710    if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1711      unsigned ShAmt = SA->getZExtValue();
1712
1713      // If the shift count is an invalid immediate, don't do anything.
1714      if (ShAmt >= BitWidth)
1715        return;
1716
1717      APInt InDemandedMask = (Mask << ShAmt);
1718      // If any of the demanded bits are produced by the sign extension, we also
1719      // demand the input sign bit.
1720      APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
1721      if (HighBits.getBoolValue())
1722        InDemandedMask |= APInt::getSignBit(BitWidth);
1723
1724      ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
1725                        Depth+1);
1726      assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1727      KnownZero = KnownZero.lshr(ShAmt);
1728      KnownOne  = KnownOne.lshr(ShAmt);
1729
1730      // Handle the sign bits.
1731      APInt SignBit = APInt::getSignBit(BitWidth);
1732      SignBit = SignBit.lshr(ShAmt);  // Adjust to where it is now in the mask.
1733
1734      if (KnownZero.intersects(SignBit)) {
1735        KnownZero |= HighBits;  // New bits are known zero.
1736      } else if (KnownOne.intersects(SignBit)) {
1737        KnownOne  |= HighBits;  // New bits are known one.
1738      }
1739    }
1740    return;
1741  case ISD::SIGN_EXTEND_INREG: {
1742    EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1743    unsigned EBits = EVT.getSizeInBits();
1744
1745    // Sign extension.  Compute the demanded bits in the result that are not
1746    // present in the input.
1747    APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits) & Mask;
1748
1749    APInt InSignBit = APInt::getSignBit(EBits);
1750    APInt InputDemandedBits = Mask & APInt::getLowBitsSet(BitWidth, EBits);
1751
1752    // If the sign extended bits are demanded, we know that the sign
1753    // bit is demanded.
1754    InSignBit.zext(BitWidth);
1755    if (NewBits.getBoolValue())
1756      InputDemandedBits |= InSignBit;
1757
1758    ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
1759                      KnownZero, KnownOne, Depth+1);
1760    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1761
1762    // If the sign bit of the input is known set or clear, then we know the
1763    // top bits of the result.
1764    if (KnownZero.intersects(InSignBit)) {         // Input sign bit known clear
1765      KnownZero |= NewBits;
1766      KnownOne  &= ~NewBits;
1767    } else if (KnownOne.intersects(InSignBit)) {   // Input sign bit known set
1768      KnownOne  |= NewBits;
1769      KnownZero &= ~NewBits;
1770    } else {                              // Input sign bit unknown
1771      KnownZero &= ~NewBits;
1772      KnownOne  &= ~NewBits;
1773    }
1774    return;
1775  }
1776  case ISD::CTTZ:
1777  case ISD::CTLZ:
1778  case ISD::CTPOP: {
1779    unsigned LowBits = Log2_32(BitWidth)+1;
1780    KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1781    KnownOne.clear();
1782    return;
1783  }
1784  case ISD::LOAD: {
1785    if (ISD::isZEXTLoad(Op.getNode())) {
1786      LoadSDNode *LD = cast<LoadSDNode>(Op);
1787      EVT VT = LD->getMemoryVT();
1788      unsigned MemBits = VT.getSizeInBits();
1789      KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
1790    }
1791    return;
1792  }
1793  case ISD::ZERO_EXTEND: {
1794    EVT InVT = Op.getOperand(0).getValueType();
1795    unsigned InBits = InVT.getScalarType().getSizeInBits();
1796    APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
1797    APInt InMask    = Mask;
1798    InMask.trunc(InBits);
1799    KnownZero.trunc(InBits);
1800    KnownOne.trunc(InBits);
1801    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1802    KnownZero.zext(BitWidth);
1803    KnownOne.zext(BitWidth);
1804    KnownZero |= NewBits;
1805    return;
1806  }
1807  case ISD::SIGN_EXTEND: {
1808    EVT InVT = Op.getOperand(0).getValueType();
1809    unsigned InBits = InVT.getScalarType().getSizeInBits();
1810    APInt InSignBit = APInt::getSignBit(InBits);
1811    APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
1812    APInt InMask = Mask;
1813    InMask.trunc(InBits);
1814
1815    // If any of the sign extended bits are demanded, we know that the sign
1816    // bit is demanded. Temporarily set this bit in the mask for our callee.
1817    if (NewBits.getBoolValue())
1818      InMask |= InSignBit;
1819
1820    KnownZero.trunc(InBits);
1821    KnownOne.trunc(InBits);
1822    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1823
1824    // Note if the sign bit is known to be zero or one.
1825    bool SignBitKnownZero = KnownZero.isNegative();
1826    bool SignBitKnownOne  = KnownOne.isNegative();
1827    assert(!(SignBitKnownZero && SignBitKnownOne) &&
1828           "Sign bit can't be known to be both zero and one!");
1829
1830    // If the sign bit wasn't actually demanded by our caller, we don't
1831    // want it set in the KnownZero and KnownOne result values. Reset the
1832    // mask and reapply it to the result values.
1833    InMask = Mask;
1834    InMask.trunc(InBits);
1835    KnownZero &= InMask;
1836    KnownOne  &= InMask;
1837
1838    KnownZero.zext(BitWidth);
1839    KnownOne.zext(BitWidth);
1840
1841    // If the sign bit is known zero or one, the top bits match.
1842    if (SignBitKnownZero)
1843      KnownZero |= NewBits;
1844    else if (SignBitKnownOne)
1845      KnownOne  |= NewBits;
1846    return;
1847  }
1848  case ISD::ANY_EXTEND: {
1849    EVT InVT = Op.getOperand(0).getValueType();
1850    unsigned InBits = InVT.getScalarType().getSizeInBits();
1851    APInt InMask = Mask;
1852    InMask.trunc(InBits);
1853    KnownZero.trunc(InBits);
1854    KnownOne.trunc(InBits);
1855    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1856    KnownZero.zext(BitWidth);
1857    KnownOne.zext(BitWidth);
1858    return;
1859  }
1860  case ISD::TRUNCATE: {
1861    EVT InVT = Op.getOperand(0).getValueType();
1862    unsigned InBits = InVT.getScalarType().getSizeInBits();
1863    APInt InMask = Mask;
1864    InMask.zext(InBits);
1865    KnownZero.zext(InBits);
1866    KnownOne.zext(InBits);
1867    ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
1868    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1869    KnownZero.trunc(BitWidth);
1870    KnownOne.trunc(BitWidth);
1871    break;
1872  }
1873  case ISD::AssertZext: {
1874    EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1875    APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
1876    ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
1877                      KnownOne, Depth+1);
1878    KnownZero |= (~InMask) & Mask;
1879    return;
1880  }
1881  case ISD::FGETSIGN:
1882    // All bits are zero except the low bit.
1883    KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1884    return;
1885
1886  case ISD::SUB: {
1887    if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
1888      // We know that the top bits of C-X are clear if X contains less bits
1889      // than C (i.e. no wrap-around can happen).  For example, 20-X is
1890      // positive if we can prove that X is >= 0 and < 16.
1891      if (CLHS->getAPIntValue().isNonNegative()) {
1892        unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
1893        // NLZ can't be BitWidth with no sign bit
1894        APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
1895        ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero2, KnownOne2,
1896                          Depth+1);
1897
1898        // If all of the MaskV bits are known to be zero, then we know the
1899        // output top bits are zero, because we now know that the output is
1900        // from [0-C].
1901        if ((KnownZero2 & MaskV) == MaskV) {
1902          unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
1903          // Top bits known zero.
1904          KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
1905        }
1906      }
1907    }
1908  }
1909  // fall through
1910  case ISD::ADD: {
1911    // Output known-0 bits are known if clear or set in both the low clear bits
1912    // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
1913    // low 3 bits clear.
1914    APInt Mask2 = APInt::getLowBitsSet(BitWidth, Mask.countTrailingOnes());
1915    ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
1916    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1917    unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
1918
1919    ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero2, KnownOne2, Depth+1);
1920    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1921    KnownZeroOut = std::min(KnownZeroOut,
1922                            KnownZero2.countTrailingOnes());
1923
1924    KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
1925    return;
1926  }
1927  case ISD::SREM:
1928    if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1929      const APInt &RA = Rem->getAPIntValue();
1930      if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
1931        APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA;
1932        APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1933        ComputeMaskedBits(Op.getOperand(0), Mask2,KnownZero2,KnownOne2,Depth+1);
1934
1935        // If the sign bit of the first operand is zero, the sign bit of
1936        // the result is zero. If the first operand has no one bits below
1937        // the second operand's single 1 bit, its sign will be zero.
1938        if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1939          KnownZero2 |= ~LowBits;
1940
1941        KnownZero |= KnownZero2 & Mask;
1942
1943        assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
1944      }
1945    }
1946    return;
1947  case ISD::UREM: {
1948    if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1949      const APInt &RA = Rem->getAPIntValue();
1950      if (RA.isPowerOf2()) {
1951        APInt LowBits = (RA - 1);
1952        APInt Mask2 = LowBits & Mask;
1953        KnownZero |= ~LowBits & Mask;
1954        ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero, KnownOne,Depth+1);
1955        assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
1956        break;
1957      }
1958    }
1959
1960    // Since the result is less than or equal to either operand, any leading
1961    // zero bits in either operand must also exist in the result.
1962    APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1963    ComputeMaskedBits(Op.getOperand(0), AllOnes, KnownZero, KnownOne,
1964                      Depth+1);
1965    ComputeMaskedBits(Op.getOperand(1), AllOnes, KnownZero2, KnownOne2,
1966                      Depth+1);
1967
1968    uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
1969                                KnownZero2.countLeadingOnes());
1970    KnownOne.clear();
1971    KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
1972    return;
1973  }
1974  default:
1975    // Allow the target to implement this method for its nodes.
1976    if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
1977  case ISD::INTRINSIC_WO_CHAIN:
1978  case ISD::INTRINSIC_W_CHAIN:
1979  case ISD::INTRINSIC_VOID:
1980      TLI.computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne, *this,
1981                                         Depth);
1982    }
1983    return;
1984  }
1985}
1986
1987/// ComputeNumSignBits - Return the number of times the sign bit of the
1988/// register is replicated into the other bits.  We know that at least 1 bit
1989/// is always equal to the sign bit (itself), but other cases can give us
1990/// information.  For example, immediately after an "SRA X, 2", we know that
1991/// the top 3 bits are all equal to each other, so we return 3.
1992unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
1993  EVT VT = Op.getValueType();
1994  assert(VT.isInteger() && "Invalid VT!");
1995  unsigned VTBits = VT.getScalarType().getSizeInBits();
1996  unsigned Tmp, Tmp2;
1997  unsigned FirstAnswer = 1;
1998
1999  if (Depth == 6)
2000    return 1;  // Limit search depth.
2001
2002  switch (Op.getOpcode()) {
2003  default: break;
2004  case ISD::AssertSext:
2005    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2006    return VTBits-Tmp+1;
2007  case ISD::AssertZext:
2008    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2009    return VTBits-Tmp;
2010
2011  case ISD::Constant: {
2012    const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2013    // If negative, return # leading ones.
2014    if (Val.isNegative())
2015      return Val.countLeadingOnes();
2016
2017    // Return # leading zeros.
2018    return Val.countLeadingZeros();
2019  }
2020
2021  case ISD::SIGN_EXTEND:
2022    Tmp = VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2023    return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2024
2025  case ISD::SIGN_EXTEND_INREG:
2026    // Max of the input and what this extends.
2027    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2028    Tmp = VTBits-Tmp+1;
2029
2030    Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2031    return std::max(Tmp, Tmp2);
2032
2033  case ISD::SRA:
2034    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2035    // SRA X, C   -> adds C sign bits.
2036    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2037      Tmp += C->getZExtValue();
2038      if (Tmp > VTBits) Tmp = VTBits;
2039    }
2040    return Tmp;
2041  case ISD::SHL:
2042    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2043      // shl destroys sign bits.
2044      Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2045      if (C->getZExtValue() >= VTBits ||      // Bad shift.
2046          C->getZExtValue() >= Tmp) break;    // Shifted all sign bits out.
2047      return Tmp - C->getZExtValue();
2048    }
2049    break;
2050  case ISD::AND:
2051  case ISD::OR:
2052  case ISD::XOR:    // NOT is handled here.
2053    // Logical binary ops preserve the number of sign bits at the worst.
2054    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2055    if (Tmp != 1) {
2056      Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2057      FirstAnswer = std::min(Tmp, Tmp2);
2058      // We computed what we know about the sign bits as our first
2059      // answer. Now proceed to the generic code that uses
2060      // ComputeMaskedBits, and pick whichever answer is better.
2061    }
2062    break;
2063
2064  case ISD::SELECT:
2065    Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2066    if (Tmp == 1) return 1;  // Early out.
2067    Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2068    return std::min(Tmp, Tmp2);
2069
2070  case ISD::SADDO:
2071  case ISD::UADDO:
2072  case ISD::SSUBO:
2073  case ISD::USUBO:
2074  case ISD::SMULO:
2075  case ISD::UMULO:
2076    if (Op.getResNo() != 1)
2077      break;
2078    // The boolean result conforms to getBooleanContents.  Fall through.
2079  case ISD::SETCC:
2080    // If setcc returns 0/-1, all bits are sign bits.
2081    if (TLI.getBooleanContents() ==
2082        TargetLowering::ZeroOrNegativeOneBooleanContent)
2083      return VTBits;
2084    break;
2085  case ISD::ROTL:
2086  case ISD::ROTR:
2087    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2088      unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2089
2090      // Handle rotate right by N like a rotate left by 32-N.
2091      if (Op.getOpcode() == ISD::ROTR)
2092        RotAmt = (VTBits-RotAmt) & (VTBits-1);
2093
2094      // If we aren't rotating out all of the known-in sign bits, return the
2095      // number that are left.  This handles rotl(sext(x), 1) for example.
2096      Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2097      if (Tmp > RotAmt+1) return Tmp-RotAmt;
2098    }
2099    break;
2100  case ISD::ADD:
2101    // Add can have at most one carry bit.  Thus we know that the output
2102    // is, at worst, one more bit than the inputs.
2103    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2104    if (Tmp == 1) return 1;  // Early out.
2105
2106    // Special case decrementing a value (ADD X, -1):
2107    if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2108      if (CRHS->isAllOnesValue()) {
2109        APInt KnownZero, KnownOne;
2110        APInt Mask = APInt::getAllOnesValue(VTBits);
2111        ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
2112
2113        // If the input is known to be 0 or 1, the output is 0/-1, which is all
2114        // sign bits set.
2115        if ((KnownZero | APInt(VTBits, 1)) == Mask)
2116          return VTBits;
2117
2118        // If we are subtracting one from a positive number, there is no carry
2119        // out of the result.
2120        if (KnownZero.isNegative())
2121          return Tmp;
2122      }
2123
2124    Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2125    if (Tmp2 == 1) return 1;
2126      return std::min(Tmp, Tmp2)-1;
2127    break;
2128
2129  case ISD::SUB:
2130    Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2131    if (Tmp2 == 1) return 1;
2132
2133    // Handle NEG.
2134    if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2135      if (CLHS->isNullValue()) {
2136        APInt KnownZero, KnownOne;
2137        APInt Mask = APInt::getAllOnesValue(VTBits);
2138        ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
2139        // If the input is known to be 0 or 1, the output is 0/-1, which is all
2140        // sign bits set.
2141        if ((KnownZero | APInt(VTBits, 1)) == Mask)
2142          return VTBits;
2143
2144        // If the input is known to be positive (the sign bit is known clear),
2145        // the output of the NEG has the same number of sign bits as the input.
2146        if (KnownZero.isNegative())
2147          return Tmp2;
2148
2149        // Otherwise, we treat this like a SUB.
2150      }
2151
2152    // Sub can have at most one carry bit.  Thus we know that the output
2153    // is, at worst, one more bit than the inputs.
2154    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2155    if (Tmp == 1) return 1;  // Early out.
2156      return std::min(Tmp, Tmp2)-1;
2157    break;
2158  case ISD::TRUNCATE:
2159    // FIXME: it's tricky to do anything useful for this, but it is an important
2160    // case for targets like X86.
2161    break;
2162  }
2163
2164  // Handle LOADX separately here. EXTLOAD case will fallthrough.
2165  if (Op.getOpcode() == ISD::LOAD) {
2166    LoadSDNode *LD = cast<LoadSDNode>(Op);
2167    unsigned ExtType = LD->getExtensionType();
2168    switch (ExtType) {
2169    default: break;
2170    case ISD::SEXTLOAD:    // '17' bits known
2171      Tmp = LD->getMemoryVT().getSizeInBits();
2172      return VTBits-Tmp+1;
2173    case ISD::ZEXTLOAD:    // '16' bits known
2174      Tmp = LD->getMemoryVT().getSizeInBits();
2175      return VTBits-Tmp;
2176    }
2177  }
2178
2179  // Allow the target to implement this method for its nodes.
2180  if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2181      Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2182      Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2183      Op.getOpcode() == ISD::INTRINSIC_VOID) {
2184    unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth);
2185    if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2186  }
2187
2188  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2189  // use this information.
2190  APInt KnownZero, KnownOne;
2191  APInt Mask = APInt::getAllOnesValue(VTBits);
2192  ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
2193
2194  if (KnownZero.isNegative()) {        // sign bit is 0
2195    Mask = KnownZero;
2196  } else if (KnownOne.isNegative()) {  // sign bit is 1;
2197    Mask = KnownOne;
2198  } else {
2199    // Nothing known.
2200    return FirstAnswer;
2201  }
2202
2203  // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2204  // the number of identical bits in the top of the input value.
2205  Mask = ~Mask;
2206  Mask <<= Mask.getBitWidth()-VTBits;
2207  // Return # leading zeros.  We use 'min' here in case Val was zero before
2208  // shifting.  We don't want to return '64' as for an i32 "0".
2209  return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2210}
2211
2212bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2213  // If we're told that NaNs won't happen, assume they won't.
2214  if (FiniteOnlyFPMath())
2215    return true;
2216
2217  // If the value is a constant, we can obviously see if it is a NaN or not.
2218  if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2219    return !C->getValueAPF().isNaN();
2220
2221  // TODO: Recognize more cases here.
2222
2223  return false;
2224}
2225
2226bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
2227  GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2228  if (!GA) return false;
2229  if (GA->getOffset() != 0) return false;
2230  GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal());
2231  if (!GV) return false;
2232  MachineModuleInfo *MMI = getMachineModuleInfo();
2233  return MMI && MMI->hasDebugInfo();
2234}
2235
2236
2237/// getShuffleScalarElt - Returns the scalar element that will make up the ith
2238/// element of the result of the vector shuffle.
2239SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
2240                                          unsigned i) {
2241  EVT VT = N->getValueType(0);
2242  DebugLoc dl = N->getDebugLoc();
2243  if (N->getMaskElt(i) < 0)
2244    return getUNDEF(VT.getVectorElementType());
2245  unsigned Index = N->getMaskElt(i);
2246  unsigned NumElems = VT.getVectorNumElements();
2247  SDValue V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
2248  Index %= NumElems;
2249
2250  if (V.getOpcode() == ISD::BIT_CONVERT) {
2251    V = V.getOperand(0);
2252    EVT VVT = V.getValueType();
2253    if (!VVT.isVector() || VVT.getVectorNumElements() != (unsigned)NumElems)
2254      return SDValue();
2255  }
2256  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
2257    return (Index == 0) ? V.getOperand(0)
2258                      : getUNDEF(VT.getVectorElementType());
2259  if (V.getOpcode() == ISD::BUILD_VECTOR)
2260    return V.getOperand(Index);
2261  if (const ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(V))
2262    return getShuffleScalarElt(SVN, Index);
2263  return SDValue();
2264}
2265
2266
2267/// getNode - Gets or creates the specified node.
2268///
2269SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) {
2270  FoldingSetNodeID ID;
2271  AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2272  void *IP = 0;
2273  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2274    return SDValue(E, 0);
2275
2276  SDNode *N = NodeAllocator.Allocate<SDNode>();
2277  new (N) SDNode(Opcode, DL, getVTList(VT));
2278  CSEMap.InsertNode(N, IP);
2279
2280  AllNodes.push_back(N);
2281#ifndef NDEBUG
2282  VerifyNode(N);
2283#endif
2284  return SDValue(N, 0);
2285}
2286
2287SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
2288                              EVT VT, SDValue Operand) {
2289  // Constant fold unary operations with an integer constant operand.
2290  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2291    const APInt &Val = C->getAPIntValue();
2292    unsigned BitWidth = VT.getSizeInBits();
2293    switch (Opcode) {
2294    default: break;
2295    case ISD::SIGN_EXTEND:
2296      return getConstant(APInt(Val).sextOrTrunc(BitWidth), VT);
2297    case ISD::ANY_EXTEND:
2298    case ISD::ZERO_EXTEND:
2299    case ISD::TRUNCATE:
2300      return getConstant(APInt(Val).zextOrTrunc(BitWidth), VT);
2301    case ISD::UINT_TO_FP:
2302    case ISD::SINT_TO_FP: {
2303      const uint64_t zero[] = {0, 0};
2304      // No compile time operations on this type.
2305      if (VT==MVT::ppcf128)
2306        break;
2307      APFloat apf = APFloat(APInt(BitWidth, 2, zero));
2308      (void)apf.convertFromAPInt(Val,
2309                                 Opcode==ISD::SINT_TO_FP,
2310                                 APFloat::rmNearestTiesToEven);
2311      return getConstantFP(apf, VT);
2312    }
2313    case ISD::BIT_CONVERT:
2314      if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2315        return getConstantFP(Val.bitsToFloat(), VT);
2316      else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2317        return getConstantFP(Val.bitsToDouble(), VT);
2318      break;
2319    case ISD::BSWAP:
2320      return getConstant(Val.byteSwap(), VT);
2321    case ISD::CTPOP:
2322      return getConstant(Val.countPopulation(), VT);
2323    case ISD::CTLZ:
2324      return getConstant(Val.countLeadingZeros(), VT);
2325    case ISD::CTTZ:
2326      return getConstant(Val.countTrailingZeros(), VT);
2327    }
2328  }
2329
2330  // Constant fold unary operations with a floating point constant operand.
2331  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2332    APFloat V = C->getValueAPF();    // make copy
2333    if (VT != MVT::ppcf128 && Operand.getValueType() != MVT::ppcf128) {
2334      switch (Opcode) {
2335      case ISD::FNEG:
2336        V.changeSign();
2337        return getConstantFP(V, VT);
2338      case ISD::FABS:
2339        V.clearSign();
2340        return getConstantFP(V, VT);
2341      case ISD::FP_ROUND:
2342      case ISD::FP_EXTEND: {
2343        bool ignored;
2344        // This can return overflow, underflow, or inexact; we don't care.
2345        // FIXME need to be more flexible about rounding mode.
2346        (void)V.convert(*EVTToAPFloatSemantics(VT),
2347                        APFloat::rmNearestTiesToEven, &ignored);
2348        return getConstantFP(V, VT);
2349      }
2350      case ISD::FP_TO_SINT:
2351      case ISD::FP_TO_UINT: {
2352        integerPart x[2];
2353        bool ignored;
2354        assert(integerPartWidth >= 64);
2355        // FIXME need to be more flexible about rounding mode.
2356        APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2357                              Opcode==ISD::FP_TO_SINT,
2358                              APFloat::rmTowardZero, &ignored);
2359        if (s==APFloat::opInvalidOp)     // inexact is OK, in fact usual
2360          break;
2361        APInt api(VT.getSizeInBits(), 2, x);
2362        return getConstant(api, VT);
2363      }
2364      case ISD::BIT_CONVERT:
2365        if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2366          return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2367        else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2368          return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2369        break;
2370      }
2371    }
2372  }
2373
2374  unsigned OpOpcode = Operand.getNode()->getOpcode();
2375  switch (Opcode) {
2376  case ISD::TokenFactor:
2377  case ISD::MERGE_VALUES:
2378  case ISD::CONCAT_VECTORS:
2379    return Operand;         // Factor, merge or concat of one node?  No need.
2380  case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2381  case ISD::FP_EXTEND:
2382    assert(VT.isFloatingPoint() &&
2383           Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2384    if (Operand.getValueType() == VT) return Operand;  // noop conversion.
2385    assert((!VT.isVector() ||
2386            VT.getVectorNumElements() ==
2387            Operand.getValueType().getVectorNumElements()) &&
2388           "Vector element count mismatch!");
2389    if (Operand.getOpcode() == ISD::UNDEF)
2390      return getUNDEF(VT);
2391    break;
2392  case ISD::SIGN_EXTEND:
2393    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2394           "Invalid SIGN_EXTEND!");
2395    if (Operand.getValueType() == VT) return Operand;   // noop extension
2396    assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2397           "Invalid sext node, dst < src!");
2398    assert((!VT.isVector() ||
2399            VT.getVectorNumElements() ==
2400            Operand.getValueType().getVectorNumElements()) &&
2401           "Vector element count mismatch!");
2402    if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2403      return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2404    break;
2405  case ISD::ZERO_EXTEND:
2406    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2407           "Invalid ZERO_EXTEND!");
2408    if (Operand.getValueType() == VT) return Operand;   // noop extension
2409    assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2410           "Invalid zext node, dst < src!");
2411    assert((!VT.isVector() ||
2412            VT.getVectorNumElements() ==
2413            Operand.getValueType().getVectorNumElements()) &&
2414           "Vector element count mismatch!");
2415    if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
2416      return getNode(ISD::ZERO_EXTEND, DL, VT,
2417                     Operand.getNode()->getOperand(0));
2418    break;
2419  case ISD::ANY_EXTEND:
2420    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2421           "Invalid ANY_EXTEND!");
2422    if (Operand.getValueType() == VT) return Operand;   // noop extension
2423    assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2424           "Invalid anyext node, dst < src!");
2425    assert((!VT.isVector() ||
2426            VT.getVectorNumElements() ==
2427            Operand.getValueType().getVectorNumElements()) &&
2428           "Vector element count mismatch!");
2429    if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND)
2430      // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
2431      return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2432    break;
2433  case ISD::TRUNCATE:
2434    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2435           "Invalid TRUNCATE!");
2436    if (Operand.getValueType() == VT) return Operand;   // noop truncate
2437    assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2438           "Invalid truncate node, src < dst!");
2439    assert((!VT.isVector() ||
2440            VT.getVectorNumElements() ==
2441            Operand.getValueType().getVectorNumElements()) &&
2442           "Vector element count mismatch!");
2443    if (OpOpcode == ISD::TRUNCATE)
2444      return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2445    else if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2446             OpOpcode == ISD::ANY_EXTEND) {
2447      // If the source is smaller than the dest, we still need an extend.
2448      if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2449            .bitsLT(VT.getScalarType()))
2450        return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2451      else if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2452        return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2453      else
2454        return Operand.getNode()->getOperand(0);
2455    }
2456    break;
2457  case ISD::BIT_CONVERT:
2458    // Basic sanity checking.
2459    assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2460           && "Cannot BIT_CONVERT between types of different sizes!");
2461    if (VT == Operand.getValueType()) return Operand;  // noop conversion.
2462    if (OpOpcode == ISD::BIT_CONVERT)  // bitconv(bitconv(x)) -> bitconv(x)
2463      return getNode(ISD::BIT_CONVERT, DL, VT, Operand.getOperand(0));
2464    if (OpOpcode == ISD::UNDEF)
2465      return getUNDEF(VT);
2466    break;
2467  case ISD::SCALAR_TO_VECTOR:
2468    assert(VT.isVector() && !Operand.getValueType().isVector() &&
2469           (VT.getVectorElementType() == Operand.getValueType() ||
2470            (VT.getVectorElementType().isInteger() &&
2471             Operand.getValueType().isInteger() &&
2472             VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2473           "Illegal SCALAR_TO_VECTOR node!");
2474    if (OpOpcode == ISD::UNDEF)
2475      return getUNDEF(VT);
2476    // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2477    if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2478        isa<ConstantSDNode>(Operand.getOperand(1)) &&
2479        Operand.getConstantOperandVal(1) == 0 &&
2480        Operand.getOperand(0).getValueType() == VT)
2481      return Operand.getOperand(0);
2482    break;
2483  case ISD::FNEG:
2484    // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2485    if (UnsafeFPMath && OpOpcode == ISD::FSUB)
2486      return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2487                     Operand.getNode()->getOperand(0));
2488    if (OpOpcode == ISD::FNEG)  // --X -> X
2489      return Operand.getNode()->getOperand(0);
2490    break;
2491  case ISD::FABS:
2492    if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
2493      return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2494    break;
2495  }
2496
2497  SDNode *N;
2498  SDVTList VTs = getVTList(VT);
2499  if (VT != MVT::Flag) { // Don't CSE flag producing nodes
2500    FoldingSetNodeID ID;
2501    SDValue Ops[1] = { Operand };
2502    AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2503    void *IP = 0;
2504    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2505      return SDValue(E, 0);
2506
2507    N = NodeAllocator.Allocate<UnarySDNode>();
2508    new (N) UnarySDNode(Opcode, DL, VTs, Operand);
2509    CSEMap.InsertNode(N, IP);
2510  } else {
2511    N = NodeAllocator.Allocate<UnarySDNode>();
2512    new (N) UnarySDNode(Opcode, DL, VTs, Operand);
2513  }
2514
2515  AllNodes.push_back(N);
2516#ifndef NDEBUG
2517  VerifyNode(N);
2518#endif
2519  return SDValue(N, 0);
2520}
2521
2522SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode,
2523                                             EVT VT,
2524                                             ConstantSDNode *Cst1,
2525                                             ConstantSDNode *Cst2) {
2526  const APInt &C1 = Cst1->getAPIntValue(), &C2 = Cst2->getAPIntValue();
2527
2528  switch (Opcode) {
2529  case ISD::ADD:  return getConstant(C1 + C2, VT);
2530  case ISD::SUB:  return getConstant(C1 - C2, VT);
2531  case ISD::MUL:  return getConstant(C1 * C2, VT);
2532  case ISD::UDIV:
2533    if (C2.getBoolValue()) return getConstant(C1.udiv(C2), VT);
2534    break;
2535  case ISD::UREM:
2536    if (C2.getBoolValue()) return getConstant(C1.urem(C2), VT);
2537    break;
2538  case ISD::SDIV:
2539    if (C2.getBoolValue()) return getConstant(C1.sdiv(C2), VT);
2540    break;
2541  case ISD::SREM:
2542    if (C2.getBoolValue()) return getConstant(C1.srem(C2), VT);
2543    break;
2544  case ISD::AND:  return getConstant(C1 & C2, VT);
2545  case ISD::OR:   return getConstant(C1 | C2, VT);
2546  case ISD::XOR:  return getConstant(C1 ^ C2, VT);
2547  case ISD::SHL:  return getConstant(C1 << C2, VT);
2548  case ISD::SRL:  return getConstant(C1.lshr(C2), VT);
2549  case ISD::SRA:  return getConstant(C1.ashr(C2), VT);
2550  case ISD::ROTL: return getConstant(C1.rotl(C2), VT);
2551  case ISD::ROTR: return getConstant(C1.rotr(C2), VT);
2552  default: break;
2553  }
2554
2555  return SDValue();
2556}
2557
2558SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
2559                              SDValue N1, SDValue N2) {
2560  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2561  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2562  switch (Opcode) {
2563  default: break;
2564  case ISD::TokenFactor:
2565    assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2566           N2.getValueType() == MVT::Other && "Invalid token factor!");
2567    // Fold trivial token factors.
2568    if (N1.getOpcode() == ISD::EntryToken) return N2;
2569    if (N2.getOpcode() == ISD::EntryToken) return N1;
2570    if (N1 == N2) return N1;
2571    break;
2572  case ISD::CONCAT_VECTORS:
2573    // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2574    // one big BUILD_VECTOR.
2575    if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2576        N2.getOpcode() == ISD::BUILD_VECTOR) {
2577      SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
2578      Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
2579      return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2580    }
2581    break;
2582  case ISD::AND:
2583    assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
2584           N1.getValueType() == VT && "Binary operator types must match!");
2585    // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
2586    // worth handling here.
2587    if (N2C && N2C->isNullValue())
2588      return N2;
2589    if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
2590      return N1;
2591    break;
2592  case ISD::OR:
2593  case ISD::XOR:
2594  case ISD::ADD:
2595  case ISD::SUB:
2596    assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
2597           N1.getValueType() == VT && "Binary operator types must match!");
2598    // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
2599    // it's worth handling here.
2600    if (N2C && N2C->isNullValue())
2601      return N1;
2602    break;
2603  case ISD::UDIV:
2604  case ISD::UREM:
2605  case ISD::MULHU:
2606  case ISD::MULHS:
2607  case ISD::MUL:
2608  case ISD::SDIV:
2609  case ISD::SREM:
2610    assert(VT.isInteger() && "This operator does not apply to FP types!");
2611    // fall through
2612  case ISD::FADD:
2613  case ISD::FSUB:
2614  case ISD::FMUL:
2615  case ISD::FDIV:
2616  case ISD::FREM:
2617    if (UnsafeFPMath) {
2618      if (Opcode == ISD::FADD) {
2619        // 0+x --> x
2620        if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2621          if (CFP->getValueAPF().isZero())
2622            return N2;
2623        // x+0 --> x
2624        if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2625          if (CFP->getValueAPF().isZero())
2626            return N1;
2627      } else if (Opcode == ISD::FSUB) {
2628        // x-0 --> x
2629        if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2630          if (CFP->getValueAPF().isZero())
2631            return N1;
2632      }
2633    }
2634    assert(N1.getValueType() == N2.getValueType() &&
2635           N1.getValueType() == VT && "Binary operator types must match!");
2636    break;
2637  case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
2638    assert(N1.getValueType() == VT &&
2639           N1.getValueType().isFloatingPoint() &&
2640           N2.getValueType().isFloatingPoint() &&
2641           "Invalid FCOPYSIGN!");
2642    break;
2643  case ISD::SHL:
2644  case ISD::SRA:
2645  case ISD::SRL:
2646  case ISD::ROTL:
2647  case ISD::ROTR:
2648    assert(VT == N1.getValueType() &&
2649           "Shift operators return type must be the same as their first arg");
2650    assert(VT.isInteger() && N2.getValueType().isInteger() &&
2651           "Shifts only work on integers");
2652
2653    // Always fold shifts of i1 values so the code generator doesn't need to
2654    // handle them.  Since we know the size of the shift has to be less than the
2655    // size of the value, the shift/rotate count is guaranteed to be zero.
2656    if (VT == MVT::i1)
2657      return N1;
2658    break;
2659  case ISD::FP_ROUND_INREG: {
2660    EVT EVT = cast<VTSDNode>(N2)->getVT();
2661    assert(VT == N1.getValueType() && "Not an inreg round!");
2662    assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
2663           "Cannot FP_ROUND_INREG integer types");
2664    assert(EVT.bitsLE(VT) && "Not rounding down!");
2665    if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
2666    break;
2667  }
2668  case ISD::FP_ROUND:
2669    assert(VT.isFloatingPoint() &&
2670           N1.getValueType().isFloatingPoint() &&
2671           VT.bitsLE(N1.getValueType()) &&
2672           isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
2673    if (N1.getValueType() == VT) return N1;  // noop conversion.
2674    break;
2675  case ISD::AssertSext:
2676  case ISD::AssertZext: {
2677    EVT EVT = cast<VTSDNode>(N2)->getVT();
2678    assert(VT == N1.getValueType() && "Not an inreg extend!");
2679    assert(VT.isInteger() && EVT.isInteger() &&
2680           "Cannot *_EXTEND_INREG FP types");
2681    assert(!EVT.isVector() &&
2682           "AssertSExt/AssertZExt type should be the vector element type "
2683           "rather than the vector type!");
2684    assert(EVT.bitsLE(VT) && "Not extending!");
2685    if (VT == EVT) return N1; // noop assertion.
2686    break;
2687  }
2688  case ISD::SIGN_EXTEND_INREG: {
2689    EVT EVT = cast<VTSDNode>(N2)->getVT();
2690    assert(VT == N1.getValueType() && "Not an inreg extend!");
2691    assert(VT.isInteger() && EVT.isInteger() &&
2692           "Cannot *_EXTEND_INREG FP types");
2693    assert(!EVT.isVector() &&
2694           "SIGN_EXTEND_INREG type should be the vector element type rather "
2695           "than the vector type!");
2696    assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
2697    if (EVT == VT) return N1;  // Not actually extending
2698
2699    if (N1C) {
2700      APInt Val = N1C->getAPIntValue();
2701      unsigned FromBits = EVT.getSizeInBits();
2702      Val <<= Val.getBitWidth()-FromBits;
2703      Val = Val.ashr(Val.getBitWidth()-FromBits);
2704      return getConstant(Val, VT);
2705    }
2706    break;
2707  }
2708  case ISD::EXTRACT_VECTOR_ELT:
2709    // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
2710    if (N1.getOpcode() == ISD::UNDEF)
2711      return getUNDEF(VT);
2712
2713    // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
2714    // expanding copies of large vectors from registers.
2715    if (N2C &&
2716        N1.getOpcode() == ISD::CONCAT_VECTORS &&
2717        N1.getNumOperands() > 0) {
2718      unsigned Factor =
2719        N1.getOperand(0).getValueType().getVectorNumElements();
2720      return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
2721                     N1.getOperand(N2C->getZExtValue() / Factor),
2722                     getConstant(N2C->getZExtValue() % Factor,
2723                                 N2.getValueType()));
2724    }
2725
2726    // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
2727    // expanding large vector constants.
2728    if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
2729      SDValue Elt = N1.getOperand(N2C->getZExtValue());
2730      EVT VEltTy = N1.getValueType().getVectorElementType();
2731      if (Elt.getValueType() != VEltTy) {
2732        // If the vector element type is not legal, the BUILD_VECTOR operands
2733        // are promoted and implicitly truncated.  Make that explicit here.
2734        Elt = getNode(ISD::TRUNCATE, DL, VEltTy, Elt);
2735      }
2736      if (VT != VEltTy) {
2737        // If the vector element type is not legal, the EXTRACT_VECTOR_ELT
2738        // result is implicitly extended.
2739        Elt = getNode(ISD::ANY_EXTEND, DL, VT, Elt);
2740      }
2741      return Elt;
2742    }
2743
2744    // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
2745    // operations are lowered to scalars.
2746    if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
2747      // If the indices are the same, return the inserted element.
2748      if (N1.getOperand(2) == N2)
2749        return N1.getOperand(1);
2750      // If the indices are known different, extract the element from
2751      // the original vector.
2752      else if (isa<ConstantSDNode>(N1.getOperand(2)) &&
2753               isa<ConstantSDNode>(N2))
2754        return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
2755    }
2756    break;
2757  case ISD::EXTRACT_ELEMENT:
2758    assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
2759    assert(!N1.getValueType().isVector() && !VT.isVector() &&
2760           (N1.getValueType().isInteger() == VT.isInteger()) &&
2761           "Wrong types for EXTRACT_ELEMENT!");
2762
2763    // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
2764    // 64-bit integers into 32-bit parts.  Instead of building the extract of
2765    // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
2766    if (N1.getOpcode() == ISD::BUILD_PAIR)
2767      return N1.getOperand(N2C->getZExtValue());
2768
2769    // EXTRACT_ELEMENT of a constant int is also very common.
2770    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
2771      unsigned ElementSize = VT.getSizeInBits();
2772      unsigned Shift = ElementSize * N2C->getZExtValue();
2773      APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
2774      return getConstant(ShiftedVal.trunc(ElementSize), VT);
2775    }
2776    break;
2777  case ISD::EXTRACT_SUBVECTOR:
2778    if (N1.getValueType() == VT) // Trivial extraction.
2779      return N1;
2780    break;
2781  }
2782
2783  if (N1C) {
2784    if (N2C) {
2785      SDValue SV = FoldConstantArithmetic(Opcode, VT, N1C, N2C);
2786      if (SV.getNode()) return SV;
2787    } else {      // Cannonicalize constant to RHS if commutative
2788      if (isCommutativeBinOp(Opcode)) {
2789        std::swap(N1C, N2C);
2790        std::swap(N1, N2);
2791      }
2792    }
2793  }
2794
2795  // Constant fold FP operations.
2796  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
2797  ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
2798  if (N1CFP) {
2799    if (!N2CFP && isCommutativeBinOp(Opcode)) {
2800      // Cannonicalize constant to RHS if commutative
2801      std::swap(N1CFP, N2CFP);
2802      std::swap(N1, N2);
2803    } else if (N2CFP && VT != MVT::ppcf128) {
2804      APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
2805      APFloat::opStatus s;
2806      switch (Opcode) {
2807      case ISD::FADD:
2808        s = V1.add(V2, APFloat::rmNearestTiesToEven);
2809        if (s != APFloat::opInvalidOp)
2810          return getConstantFP(V1, VT);
2811        break;
2812      case ISD::FSUB:
2813        s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
2814        if (s!=APFloat::opInvalidOp)
2815          return getConstantFP(V1, VT);
2816        break;
2817      case ISD::FMUL:
2818        s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
2819        if (s!=APFloat::opInvalidOp)
2820          return getConstantFP(V1, VT);
2821        break;
2822      case ISD::FDIV:
2823        s = V1.divide(V2, APFloat::rmNearestTiesToEven);
2824        if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
2825          return getConstantFP(V1, VT);
2826        break;
2827      case ISD::FREM :
2828        s = V1.mod(V2, APFloat::rmNearestTiesToEven);
2829        if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
2830          return getConstantFP(V1, VT);
2831        break;
2832      case ISD::FCOPYSIGN:
2833        V1.copySign(V2);
2834        return getConstantFP(V1, VT);
2835      default: break;
2836      }
2837    }
2838  }
2839
2840  // Canonicalize an UNDEF to the RHS, even over a constant.
2841  if (N1.getOpcode() == ISD::UNDEF) {
2842    if (isCommutativeBinOp(Opcode)) {
2843      std::swap(N1, N2);
2844    } else {
2845      switch (Opcode) {
2846      case ISD::FP_ROUND_INREG:
2847      case ISD::SIGN_EXTEND_INREG:
2848      case ISD::SUB:
2849      case ISD::FSUB:
2850      case ISD::FDIV:
2851      case ISD::FREM:
2852      case ISD::SRA:
2853        return N1;     // fold op(undef, arg2) -> undef
2854      case ISD::UDIV:
2855      case ISD::SDIV:
2856      case ISD::UREM:
2857      case ISD::SREM:
2858      case ISD::SRL:
2859      case ISD::SHL:
2860        if (!VT.isVector())
2861          return getConstant(0, VT);    // fold op(undef, arg2) -> 0
2862        // For vectors, we can't easily build an all zero vector, just return
2863        // the LHS.
2864        return N2;
2865      }
2866    }
2867  }
2868
2869  // Fold a bunch of operators when the RHS is undef.
2870  if (N2.getOpcode() == ISD::UNDEF) {
2871    switch (Opcode) {
2872    case ISD::XOR:
2873      if (N1.getOpcode() == ISD::UNDEF)
2874        // Handle undef ^ undef -> 0 special case. This is a common
2875        // idiom (misuse).
2876        return getConstant(0, VT);
2877      // fallthrough
2878    case ISD::ADD:
2879    case ISD::ADDC:
2880    case ISD::ADDE:
2881    case ISD::SUB:
2882    case ISD::UDIV:
2883    case ISD::SDIV:
2884    case ISD::UREM:
2885    case ISD::SREM:
2886      return N2;       // fold op(arg1, undef) -> undef
2887    case ISD::FADD:
2888    case ISD::FSUB:
2889    case ISD::FMUL:
2890    case ISD::FDIV:
2891    case ISD::FREM:
2892      if (UnsafeFPMath)
2893        return N2;
2894      break;
2895    case ISD::MUL:
2896    case ISD::AND:
2897    case ISD::SRL:
2898    case ISD::SHL:
2899      if (!VT.isVector())
2900        return getConstant(0, VT);  // fold op(arg1, undef) -> 0
2901      // For vectors, we can't easily build an all zero vector, just return
2902      // the LHS.
2903      return N1;
2904    case ISD::OR:
2905      if (!VT.isVector())
2906        return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
2907      // For vectors, we can't easily build an all one vector, just return
2908      // the LHS.
2909      return N1;
2910    case ISD::SRA:
2911      return N1;
2912    }
2913  }
2914
2915  // Memoize this node if possible.
2916  SDNode *N;
2917  SDVTList VTs = getVTList(VT);
2918  if (VT != MVT::Flag) {
2919    SDValue Ops[] = { N1, N2 };
2920    FoldingSetNodeID ID;
2921    AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
2922    void *IP = 0;
2923    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2924      return SDValue(E, 0);
2925
2926    N = NodeAllocator.Allocate<BinarySDNode>();
2927    new (N) BinarySDNode(Opcode, DL, VTs, N1, N2);
2928    CSEMap.InsertNode(N, IP);
2929  } else {
2930    N = NodeAllocator.Allocate<BinarySDNode>();
2931    new (N) BinarySDNode(Opcode, DL, VTs, N1, N2);
2932  }
2933
2934  AllNodes.push_back(N);
2935#ifndef NDEBUG
2936  VerifyNode(N);
2937#endif
2938  return SDValue(N, 0);
2939}
2940
2941SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
2942                              SDValue N1, SDValue N2, SDValue N3) {
2943  // Perform various simplifications.
2944  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2945  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2946  switch (Opcode) {
2947  case ISD::CONCAT_VECTORS:
2948    // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2949    // one big BUILD_VECTOR.
2950    if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2951        N2.getOpcode() == ISD::BUILD_VECTOR &&
2952        N3.getOpcode() == ISD::BUILD_VECTOR) {
2953      SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
2954      Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
2955      Elts.insert(Elts.end(), N3.getNode()->op_begin(), N3.getNode()->op_end());
2956      return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2957    }
2958    break;
2959  case ISD::SETCC: {
2960    // Use FoldSetCC to simplify SETCC's.
2961    SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
2962    if (Simp.getNode()) return Simp;
2963    break;
2964  }
2965  case ISD::SELECT:
2966    if (N1C) {
2967     if (N1C->getZExtValue())
2968        return N2;             // select true, X, Y -> X
2969      else
2970        return N3;             // select false, X, Y -> Y
2971    }
2972
2973    if (N2 == N3) return N2;   // select C, X, X -> X
2974    break;
2975  case ISD::BRCOND:
2976    if (N2C) {
2977      if (N2C->getZExtValue()) // Unconditional branch
2978        return getNode(ISD::BR, DL, MVT::Other, N1, N3);
2979      else
2980        return N1;         // Never-taken branch
2981    }
2982    break;
2983  case ISD::VECTOR_SHUFFLE:
2984    llvm_unreachable("should use getVectorShuffle constructor!");
2985    break;
2986  case ISD::BIT_CONVERT:
2987    // Fold bit_convert nodes from a type to themselves.
2988    if (N1.getValueType() == VT)
2989      return N1;
2990    break;
2991  }
2992
2993  // Memoize node if it doesn't produce a flag.
2994  SDNode *N;
2995  SDVTList VTs = getVTList(VT);
2996  if (VT != MVT::Flag) {
2997    SDValue Ops[] = { N1, N2, N3 };
2998    FoldingSetNodeID ID;
2999    AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3000    void *IP = 0;
3001    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3002      return SDValue(E, 0);
3003
3004    N = NodeAllocator.Allocate<TernarySDNode>();
3005    new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
3006    CSEMap.InsertNode(N, IP);
3007  } else {
3008    N = NodeAllocator.Allocate<TernarySDNode>();
3009    new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
3010  }
3011
3012  AllNodes.push_back(N);
3013#ifndef NDEBUG
3014  VerifyNode(N);
3015#endif
3016  return SDValue(N, 0);
3017}
3018
3019SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3020                              SDValue N1, SDValue N2, SDValue N3,
3021                              SDValue N4) {
3022  SDValue Ops[] = { N1, N2, N3, N4 };
3023  return getNode(Opcode, DL, VT, Ops, 4);
3024}
3025
3026SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3027                              SDValue N1, SDValue N2, SDValue N3,
3028                              SDValue N4, SDValue N5) {
3029  SDValue Ops[] = { N1, N2, N3, N4, N5 };
3030  return getNode(Opcode, DL, VT, Ops, 5);
3031}
3032
3033/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3034/// the incoming stack arguments to be loaded from the stack.
3035SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3036  SmallVector<SDValue, 8> ArgChains;
3037
3038  // Include the original chain at the beginning of the list. When this is
3039  // used by target LowerCall hooks, this helps legalize find the
3040  // CALLSEQ_BEGIN node.
3041  ArgChains.push_back(Chain);
3042
3043  // Add a chain value for each stack argument.
3044  for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3045       UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3046    if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3047      if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3048        if (FI->getIndex() < 0)
3049          ArgChains.push_back(SDValue(L, 1));
3050
3051  // Build a tokenfactor for all the chains.
3052  return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
3053                 &ArgChains[0], ArgChains.size());
3054}
3055
3056/// getMemsetValue - Vectorized representation of the memset value
3057/// operand.
3058static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3059                              DebugLoc dl) {
3060  unsigned NumBits = VT.isVector() ?
3061    VT.getVectorElementType().getSizeInBits() : VT.getSizeInBits();
3062  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3063    APInt Val = APInt(NumBits, C->getZExtValue() & 255);
3064    unsigned Shift = 8;
3065    for (unsigned i = NumBits; i > 8; i >>= 1) {
3066      Val = (Val << Shift) | Val;
3067      Shift <<= 1;
3068    }
3069    if (VT.isInteger())
3070      return DAG.getConstant(Val, VT);
3071    return DAG.getConstantFP(APFloat(Val), VT);
3072  }
3073
3074  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3075  Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3076  unsigned Shift = 8;
3077  for (unsigned i = NumBits; i > 8; i >>= 1) {
3078    Value = DAG.getNode(ISD::OR, dl, VT,
3079                        DAG.getNode(ISD::SHL, dl, VT, Value,
3080                                    DAG.getConstant(Shift,
3081                                                    TLI.getShiftAmountTy())),
3082                        Value);
3083    Shift <<= 1;
3084  }
3085
3086  return Value;
3087}
3088
3089/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3090/// used when a memcpy is turned into a memset when the source is a constant
3091/// string ptr.
3092static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
3093                                  const TargetLowering &TLI,
3094                                  std::string &Str, unsigned Offset) {
3095  // Handle vector with all elements zero.
3096  if (Str.empty()) {
3097    if (VT.isInteger())
3098      return DAG.getConstant(0, VT);
3099    unsigned NumElts = VT.getVectorNumElements();
3100    MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3101    return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
3102                       DAG.getConstant(0,
3103                       EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts)));
3104  }
3105
3106  assert(!VT.isVector() && "Can't handle vector type here!");
3107  unsigned NumBits = VT.getSizeInBits();
3108  unsigned MSB = NumBits / 8;
3109  uint64_t Val = 0;
3110  if (TLI.isLittleEndian())
3111    Offset = Offset + MSB - 1;
3112  for (unsigned i = 0; i != MSB; ++i) {
3113    Val = (Val << 8) | (unsigned char)Str[Offset];
3114    Offset += TLI.isLittleEndian() ? -1 : 1;
3115  }
3116  return DAG.getConstant(Val, VT);
3117}
3118
3119/// getMemBasePlusOffset - Returns base and offset node for the
3120///
3121static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset,
3122                                      SelectionDAG &DAG) {
3123  EVT VT = Base.getValueType();
3124  return DAG.getNode(ISD::ADD, Base.getDebugLoc(),
3125                     VT, Base, DAG.getConstant(Offset, VT));
3126}
3127
3128/// isMemSrcFromString - Returns true if memcpy source is a string constant.
3129///
3130static bool isMemSrcFromString(SDValue Src, std::string &Str) {
3131  unsigned SrcDelta = 0;
3132  GlobalAddressSDNode *G = NULL;
3133  if (Src.getOpcode() == ISD::GlobalAddress)
3134    G = cast<GlobalAddressSDNode>(Src);
3135  else if (Src.getOpcode() == ISD::ADD &&
3136           Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3137           Src.getOperand(1).getOpcode() == ISD::Constant) {
3138    G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3139    SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3140  }
3141  if (!G)
3142    return false;
3143
3144  GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
3145  if (GV && GetConstantStringInfo(GV, Str, SrcDelta, false))
3146    return true;
3147
3148  return false;
3149}
3150
3151/// MeetsMaxMemopRequirement - Determines if the number of memory ops required
3152/// to replace the memset / memcpy is below the threshold. It also returns the
3153/// types of the sequence of memory ops to perform memset / memcpy.
3154static
3155bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
3156                              SDValue Dst, SDValue Src,
3157                              unsigned Limit, uint64_t Size, unsigned &Align,
3158                              std::string &Str, bool &isSrcStr,
3159                              SelectionDAG &DAG,
3160                              const TargetLowering &TLI) {
3161  isSrcStr = isMemSrcFromString(Src, Str);
3162  bool isSrcConst = isa<ConstantSDNode>(Src);
3163  EVT VT = TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr, DAG);
3164  bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses(VT);
3165  if (VT != MVT::iAny) {
3166    const Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3167    unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
3168    // If source is a string constant, this will require an unaligned load.
3169    if (NewAlign > Align && (isSrcConst || AllowUnalign)) {
3170      if (Dst.getOpcode() != ISD::FrameIndex) {
3171        // Can't change destination alignment. It requires a unaligned store.
3172        if (AllowUnalign)
3173          VT = MVT::iAny;
3174      } else {
3175        int FI = cast<FrameIndexSDNode>(Dst)->getIndex();
3176        MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3177        if (MFI->isFixedObjectIndex(FI)) {
3178          // Can't change destination alignment. It requires a unaligned store.
3179          if (AllowUnalign)
3180            VT = MVT::iAny;
3181        } else {
3182          // Give the stack frame object a larger alignment if needed.
3183          if (MFI->getObjectAlignment(FI) < NewAlign)
3184            MFI->setObjectAlignment(FI, NewAlign);
3185          Align = NewAlign;
3186        }
3187      }
3188    }
3189  }
3190
3191  if (VT == MVT::iAny) {
3192    if (TLI.allowsUnalignedMemoryAccesses(MVT::i64)) {
3193      VT = MVT::i64;
3194    } else {
3195      switch (Align & 7) {
3196      case 0:  VT = MVT::i64; break;
3197      case 4:  VT = MVT::i32; break;
3198      case 2:  VT = MVT::i16; break;
3199      default: VT = MVT::i8;  break;
3200      }
3201    }
3202
3203    MVT LVT = MVT::i64;
3204    while (!TLI.isTypeLegal(LVT))
3205      LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3206    assert(LVT.isInteger());
3207
3208    if (VT.bitsGT(LVT))
3209      VT = LVT;
3210  }
3211
3212  unsigned NumMemOps = 0;
3213  while (Size != 0) {
3214    unsigned VTSize = VT.getSizeInBits() / 8;
3215    while (VTSize > Size) {
3216      // For now, only use non-vector load / store's for the left-over pieces.
3217      if (VT.isVector()) {
3218        VT = MVT::i64;
3219        while (!TLI.isTypeLegal(VT))
3220          VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
3221        VTSize = VT.getSizeInBits() / 8;
3222      } else {
3223        // This can result in a type that is not legal on the target, e.g.
3224        // 1 or 2 bytes on PPC.
3225        VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
3226        VTSize >>= 1;
3227      }
3228    }
3229
3230    if (++NumMemOps > Limit)
3231      return false;
3232    MemOps.push_back(VT);
3233    Size -= VTSize;
3234  }
3235
3236  return true;
3237}
3238
3239static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3240                                         SDValue Chain, SDValue Dst,
3241                                         SDValue Src, uint64_t Size,
3242                                         unsigned Align, bool AlwaysInline,
3243                                         const Value *DstSV, uint64_t DstSVOff,
3244                                         const Value *SrcSV, uint64_t SrcSVOff){
3245  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3246
3247  // Expand memcpy to a series of load and store ops if the size operand falls
3248  // below a certain threshold.
3249  std::vector<EVT> MemOps;
3250  uint64_t Limit = -1ULL;
3251  if (!AlwaysInline)
3252    Limit = TLI.getMaxStoresPerMemcpy();
3253  unsigned DstAlign = Align;  // Destination alignment can change.
3254  std::string Str;
3255  bool CopyFromStr;
3256  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
3257                                Str, CopyFromStr, DAG, TLI))
3258    return SDValue();
3259
3260
3261  bool isZeroStr = CopyFromStr && Str.empty();
3262  SmallVector<SDValue, 8> OutChains;
3263  unsigned NumMemOps = MemOps.size();
3264  uint64_t SrcOff = 0, DstOff = 0;
3265  for (unsigned i = 0; i != NumMemOps; ++i) {
3266    EVT VT = MemOps[i];
3267    unsigned VTSize = VT.getSizeInBits() / 8;
3268    SDValue Value, Store;
3269
3270    if (CopyFromStr && (isZeroStr || !VT.isVector())) {
3271      // It's unlikely a store of a vector immediate can be done in a single
3272      // instruction. It would require a load from a constantpool first.
3273      // We also handle store a vector with all zero's.
3274      // FIXME: Handle other cases where store of vector immediate is done in
3275      // a single instruction.
3276      Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
3277      Store = DAG.getStore(Chain, dl, Value,
3278                           getMemBasePlusOffset(Dst, DstOff, DAG),
3279                           DstSV, DstSVOff + DstOff, false, DstAlign);
3280    } else {
3281      // The type might not be legal for the target.  This should only happen
3282      // if the type is smaller than a legal type, as on PPC, so the right
3283      // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
3284      // to Load/Store if NVT==VT.
3285      // FIXME does the case above also need this?
3286      EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3287      assert(NVT.bitsGE(VT));
3288      Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3289                             getMemBasePlusOffset(Src, SrcOff, DAG),
3290                             SrcSV, SrcSVOff + SrcOff, VT, false, Align);
3291      Store = DAG.getTruncStore(Chain, dl, Value,
3292                             getMemBasePlusOffset(Dst, DstOff, DAG),
3293                             DstSV, DstSVOff + DstOff, VT, false, DstAlign);
3294    }
3295    OutChains.push_back(Store);
3296    SrcOff += VTSize;
3297    DstOff += VTSize;
3298  }
3299
3300  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3301                     &OutChains[0], OutChains.size());
3302}
3303
3304static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3305                                          SDValue Chain, SDValue Dst,
3306                                          SDValue Src, uint64_t Size,
3307                                          unsigned Align, bool AlwaysInline,
3308                                          const Value *DstSV, uint64_t DstSVOff,
3309                                          const Value *SrcSV, uint64_t SrcSVOff){
3310  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3311
3312  // Expand memmove to a series of load and store ops if the size operand falls
3313  // below a certain threshold.
3314  std::vector<EVT> MemOps;
3315  uint64_t Limit = -1ULL;
3316  if (!AlwaysInline)
3317    Limit = TLI.getMaxStoresPerMemmove();
3318  unsigned DstAlign = Align;  // Destination alignment can change.
3319  std::string Str;
3320  bool CopyFromStr;
3321  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
3322                                Str, CopyFromStr, DAG, TLI))
3323    return SDValue();
3324
3325  uint64_t SrcOff = 0, DstOff = 0;
3326
3327  SmallVector<SDValue, 8> LoadValues;
3328  SmallVector<SDValue, 8> LoadChains;
3329  SmallVector<SDValue, 8> OutChains;
3330  unsigned NumMemOps = MemOps.size();
3331  for (unsigned i = 0; i < NumMemOps; i++) {
3332    EVT VT = MemOps[i];
3333    unsigned VTSize = VT.getSizeInBits() / 8;
3334    SDValue Value, Store;
3335
3336    Value = DAG.getLoad(VT, dl, Chain,
3337                        getMemBasePlusOffset(Src, SrcOff, DAG),
3338                        SrcSV, SrcSVOff + SrcOff, false, Align);
3339    LoadValues.push_back(Value);
3340    LoadChains.push_back(Value.getValue(1));
3341    SrcOff += VTSize;
3342  }
3343  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3344                      &LoadChains[0], LoadChains.size());
3345  OutChains.clear();
3346  for (unsigned i = 0; i < NumMemOps; i++) {
3347    EVT VT = MemOps[i];
3348    unsigned VTSize = VT.getSizeInBits() / 8;
3349    SDValue Value, Store;
3350
3351    Store = DAG.getStore(Chain, dl, LoadValues[i],
3352                         getMemBasePlusOffset(Dst, DstOff, DAG),
3353                         DstSV, DstSVOff + DstOff, false, DstAlign);
3354    OutChains.push_back(Store);
3355    DstOff += VTSize;
3356  }
3357
3358  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3359                     &OutChains[0], OutChains.size());
3360}
3361
3362static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
3363                                 SDValue Chain, SDValue Dst,
3364                                 SDValue Src, uint64_t Size,
3365                                 unsigned Align,
3366                                 const Value *DstSV, uint64_t DstSVOff) {
3367  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3368
3369  // Expand memset to a series of load/store ops if the size operand
3370  // falls below a certain threshold.
3371  std::vector<EVT> MemOps;
3372  std::string Str;
3373  bool CopyFromStr;
3374  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(),
3375                                Size, Align, Str, CopyFromStr, DAG, TLI))
3376    return SDValue();
3377
3378  SmallVector<SDValue, 8> OutChains;
3379  uint64_t DstOff = 0;
3380
3381  unsigned NumMemOps = MemOps.size();
3382  for (unsigned i = 0; i < NumMemOps; i++) {
3383    EVT VT = MemOps[i];
3384    unsigned VTSize = VT.getSizeInBits() / 8;
3385    SDValue Value = getMemsetValue(Src, VT, DAG, dl);
3386    SDValue Store = DAG.getStore(Chain, dl, Value,
3387                                 getMemBasePlusOffset(Dst, DstOff, DAG),
3388                                 DstSV, DstSVOff + DstOff);
3389    OutChains.push_back(Store);
3390    DstOff += VTSize;
3391  }
3392
3393  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3394                     &OutChains[0], OutChains.size());
3395}
3396
3397SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
3398                                SDValue Src, SDValue Size,
3399                                unsigned Align, bool AlwaysInline,
3400                                const Value *DstSV, uint64_t DstSVOff,
3401                                const Value *SrcSV, uint64_t SrcSVOff) {
3402
3403  // Check to see if we should lower the memcpy to loads and stores first.
3404  // For cases within the target-specified limits, this is the best choice.
3405  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3406  if (ConstantSize) {
3407    // Memcpy with size zero? Just return the original chain.
3408    if (ConstantSize->isNullValue())
3409      return Chain;
3410
3411    SDValue Result =
3412      getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3413                              ConstantSize->getZExtValue(),
3414                              Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
3415    if (Result.getNode())
3416      return Result;
3417  }
3418
3419  // Then check to see if we should lower the memcpy with target-specific
3420  // code. If the target chooses to do this, this is the next best.
3421  SDValue Result =
3422    TLI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
3423                                AlwaysInline,
3424                                DstSV, DstSVOff, SrcSV, SrcSVOff);
3425  if (Result.getNode())
3426    return Result;
3427
3428  // If we really need inline code and the target declined to provide it,
3429  // use a (potentially long) sequence of loads and stores.
3430  if (AlwaysInline) {
3431    assert(ConstantSize && "AlwaysInline requires a constant size!");
3432    return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3433                                   ConstantSize->getZExtValue(), Align, true,
3434                                   DstSV, DstSVOff, SrcSV, SrcSVOff);
3435  }
3436
3437  // Emit a library call.
3438  TargetLowering::ArgListTy Args;
3439  TargetLowering::ArgListEntry Entry;
3440  Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
3441  Entry.Node = Dst; Args.push_back(Entry);
3442  Entry.Node = Src; Args.push_back(Entry);
3443  Entry.Node = Size; Args.push_back(Entry);
3444  // FIXME: pass in DebugLoc
3445  std::pair<SDValue,SDValue> CallResult =
3446    TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
3447                    false, false, false, false, 0,
3448                    TLI.getLibcallCallingConv(RTLIB::MEMCPY), false,
3449                    /*isReturnValueUsed=*/false,
3450                    getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
3451                                      TLI.getPointerTy()),
3452                    Args, *this, dl);
3453  return CallResult.second;
3454}
3455
3456SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
3457                                 SDValue Src, SDValue Size,
3458                                 unsigned Align,
3459                                 const Value *DstSV, uint64_t DstSVOff,
3460                                 const Value *SrcSV, uint64_t SrcSVOff) {
3461
3462  // Check to see if we should lower the memmove to loads and stores first.
3463  // For cases within the target-specified limits, this is the best choice.
3464  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3465  if (ConstantSize) {
3466    // Memmove with size zero? Just return the original chain.
3467    if (ConstantSize->isNullValue())
3468      return Chain;
3469
3470    SDValue Result =
3471      getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
3472                               ConstantSize->getZExtValue(),
3473                               Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
3474    if (Result.getNode())
3475      return Result;
3476  }
3477
3478  // Then check to see if we should lower the memmove with target-specific
3479  // code. If the target chooses to do this, this is the next best.
3480  SDValue Result =
3481    TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align,
3482                                 DstSV, DstSVOff, SrcSV, SrcSVOff);
3483  if (Result.getNode())
3484    return Result;
3485
3486  // Emit a library call.
3487  TargetLowering::ArgListTy Args;
3488  TargetLowering::ArgListEntry Entry;
3489  Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
3490  Entry.Node = Dst; Args.push_back(Entry);
3491  Entry.Node = Src; Args.push_back(Entry);
3492  Entry.Node = Size; Args.push_back(Entry);
3493  // FIXME:  pass in DebugLoc
3494  std::pair<SDValue,SDValue> CallResult =
3495    TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
3496                    false, false, false, false, 0,
3497                    TLI.getLibcallCallingConv(RTLIB::MEMMOVE), false,
3498                    /*isReturnValueUsed=*/false,
3499                    getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
3500                                      TLI.getPointerTy()),
3501                    Args, *this, dl);
3502  return CallResult.second;
3503}
3504
3505SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
3506                                SDValue Src, SDValue Size,
3507                                unsigned Align,
3508                                const Value *DstSV, uint64_t DstSVOff) {
3509
3510  // Check to see if we should lower the memset to stores first.
3511  // For cases within the target-specified limits, this is the best choice.
3512  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3513  if (ConstantSize) {
3514    // Memset with size zero? Just return the original chain.
3515    if (ConstantSize->isNullValue())
3516      return Chain;
3517
3518    SDValue Result =
3519      getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
3520                      Align, DstSV, DstSVOff);
3521    if (Result.getNode())
3522      return Result;
3523  }
3524
3525  // Then check to see if we should lower the memset with target-specific
3526  // code. If the target chooses to do this, this is the next best.
3527  SDValue Result =
3528    TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align,
3529                                DstSV, DstSVOff);
3530  if (Result.getNode())
3531    return Result;
3532
3533  // Emit a library call.
3534  const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
3535  TargetLowering::ArgListTy Args;
3536  TargetLowering::ArgListEntry Entry;
3537  Entry.Node = Dst; Entry.Ty = IntPtrTy;
3538  Args.push_back(Entry);
3539  // Extend or truncate the argument to be an i32 value for the call.
3540  if (Src.getValueType().bitsGT(MVT::i32))
3541    Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
3542  else
3543    Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
3544  Entry.Node = Src;
3545  Entry.Ty = Type::getInt32Ty(*getContext());
3546  Entry.isSExt = true;
3547  Args.push_back(Entry);
3548  Entry.Node = Size;
3549  Entry.Ty = IntPtrTy;
3550  Entry.isSExt = false;
3551  Args.push_back(Entry);
3552  // FIXME: pass in DebugLoc
3553  std::pair<SDValue,SDValue> CallResult =
3554    TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
3555                    false, false, false, false, 0,
3556                    TLI.getLibcallCallingConv(RTLIB::MEMSET), false,
3557                    /*isReturnValueUsed=*/false,
3558                    getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
3559                                      TLI.getPointerTy()),
3560                    Args, *this, dl);
3561  return CallResult.second;
3562}
3563
3564SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
3565                                SDValue Chain,
3566                                SDValue Ptr, SDValue Cmp,
3567                                SDValue Swp, const Value* PtrVal,
3568                                unsigned Alignment) {
3569  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3570    Alignment = getEVTAlignment(MemVT);
3571
3572  // Check if the memory reference references a frame index
3573  if (!PtrVal)
3574    if (const FrameIndexSDNode *FI =
3575          dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
3576      PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
3577
3578  MachineFunction &MF = getMachineFunction();
3579  unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
3580
3581  // For now, atomics are considered to be volatile always.
3582  Flags |= MachineMemOperand::MOVolatile;
3583
3584  MachineMemOperand *MMO =
3585    MF.getMachineMemOperand(PtrVal, Flags, 0,
3586                            MemVT.getStoreSize(), Alignment);
3587
3588  return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO);
3589}
3590
3591SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
3592                                SDValue Chain,
3593                                SDValue Ptr, SDValue Cmp,
3594                                SDValue Swp, MachineMemOperand *MMO) {
3595  assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
3596  assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
3597
3598  EVT VT = Cmp.getValueType();
3599
3600  SDVTList VTs = getVTList(VT, MVT::Other);
3601  FoldingSetNodeID ID;
3602  ID.AddInteger(MemVT.getRawBits());
3603  SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
3604  AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
3605  void* IP = 0;
3606  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3607    cast<AtomicSDNode>(E)->refineAlignment(MMO);
3608    return SDValue(E, 0);
3609  }
3610  SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
3611  new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Cmp, Swp, MMO);
3612  CSEMap.InsertNode(N, IP);
3613  AllNodes.push_back(N);
3614  return SDValue(N, 0);
3615}
3616
3617SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
3618                                SDValue Chain,
3619                                SDValue Ptr, SDValue Val,
3620                                const Value* PtrVal,
3621                                unsigned Alignment) {
3622  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3623    Alignment = getEVTAlignment(MemVT);
3624
3625  // Check if the memory reference references a frame index
3626  if (!PtrVal)
3627    if (const FrameIndexSDNode *FI =
3628          dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
3629      PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
3630
3631  MachineFunction &MF = getMachineFunction();
3632  unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
3633
3634  // For now, atomics are considered to be volatile always.
3635  Flags |= MachineMemOperand::MOVolatile;
3636
3637  MachineMemOperand *MMO =
3638    MF.getMachineMemOperand(PtrVal, Flags, 0,
3639                            MemVT.getStoreSize(), Alignment);
3640
3641  return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
3642}
3643
3644SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
3645                                SDValue Chain,
3646                                SDValue Ptr, SDValue Val,
3647                                MachineMemOperand *MMO) {
3648  assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
3649          Opcode == ISD::ATOMIC_LOAD_SUB ||
3650          Opcode == ISD::ATOMIC_LOAD_AND ||
3651          Opcode == ISD::ATOMIC_LOAD_OR ||
3652          Opcode == ISD::ATOMIC_LOAD_XOR ||
3653          Opcode == ISD::ATOMIC_LOAD_NAND ||
3654          Opcode == ISD::ATOMIC_LOAD_MIN ||
3655          Opcode == ISD::ATOMIC_LOAD_MAX ||
3656          Opcode == ISD::ATOMIC_LOAD_UMIN ||
3657          Opcode == ISD::ATOMIC_LOAD_UMAX ||
3658          Opcode == ISD::ATOMIC_SWAP) &&
3659         "Invalid Atomic Op");
3660
3661  EVT VT = Val.getValueType();
3662
3663  SDVTList VTs = getVTList(VT, MVT::Other);
3664  FoldingSetNodeID ID;
3665  ID.AddInteger(MemVT.getRawBits());
3666  SDValue Ops[] = {Chain, Ptr, Val};
3667  AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3668  void* IP = 0;
3669  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3670    cast<AtomicSDNode>(E)->refineAlignment(MMO);
3671    return SDValue(E, 0);
3672  }
3673  SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
3674  new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Val, MMO);
3675  CSEMap.InsertNode(N, IP);
3676  AllNodes.push_back(N);
3677  return SDValue(N, 0);
3678}
3679
3680/// getMergeValues - Create a MERGE_VALUES node from the given operands.
3681/// Allowed to return something different (and simpler) if Simplify is true.
3682SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
3683                                     DebugLoc dl) {
3684  if (NumOps == 1)
3685    return Ops[0];
3686
3687  SmallVector<EVT, 4> VTs;
3688  VTs.reserve(NumOps);
3689  for (unsigned i = 0; i < NumOps; ++i)
3690    VTs.push_back(Ops[i].getValueType());
3691  return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
3692                 Ops, NumOps);
3693}
3694
3695SDValue
3696SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
3697                                  const EVT *VTs, unsigned NumVTs,
3698                                  const SDValue *Ops, unsigned NumOps,
3699                                  EVT MemVT, const Value *srcValue, int SVOff,
3700                                  unsigned Align, bool Vol,
3701                                  bool ReadMem, bool WriteMem) {
3702  return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
3703                             MemVT, srcValue, SVOff, Align, Vol,
3704                             ReadMem, WriteMem);
3705}
3706
3707SDValue
3708SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
3709                                  const SDValue *Ops, unsigned NumOps,
3710                                  EVT MemVT, const Value *srcValue, int SVOff,
3711                                  unsigned Align, bool Vol,
3712                                  bool ReadMem, bool WriteMem) {
3713  if (Align == 0)  // Ensure that codegen never sees alignment 0
3714    Align = getEVTAlignment(MemVT);
3715
3716  MachineFunction &MF = getMachineFunction();
3717  unsigned Flags = 0;
3718  if (WriteMem)
3719    Flags |= MachineMemOperand::MOStore;
3720  if (ReadMem)
3721    Flags |= MachineMemOperand::MOLoad;
3722  if (Vol)
3723    Flags |= MachineMemOperand::MOVolatile;
3724  MachineMemOperand *MMO =
3725    MF.getMachineMemOperand(srcValue, Flags, SVOff,
3726                            MemVT.getStoreSize(), Align);
3727
3728  return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
3729}
3730
3731SDValue
3732SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
3733                                  const SDValue *Ops, unsigned NumOps,
3734                                  EVT MemVT, MachineMemOperand *MMO) {
3735  assert((Opcode == ISD::INTRINSIC_VOID ||
3736          Opcode == ISD::INTRINSIC_W_CHAIN ||
3737          (Opcode <= INT_MAX &&
3738           (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
3739         "Opcode is not a memory-accessing opcode!");
3740
3741  // Memoize the node unless it returns a flag.
3742  MemIntrinsicSDNode *N;
3743  if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
3744    FoldingSetNodeID ID;
3745    AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
3746    void *IP = 0;
3747    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3748      cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
3749      return SDValue(E, 0);
3750    }
3751
3752    N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
3753    new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
3754    CSEMap.InsertNode(N, IP);
3755  } else {
3756    N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
3757    new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
3758  }
3759  AllNodes.push_back(N);
3760  return SDValue(N, 0);
3761}
3762
3763SDValue
3764SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
3765                      ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
3766                      SDValue Ptr, SDValue Offset,
3767                      const Value *SV, int SVOffset, EVT MemVT,
3768                      bool isVolatile, unsigned Alignment) {
3769  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3770    Alignment = getEVTAlignment(VT);
3771
3772  // Check if the memory reference references a frame index
3773  if (!SV)
3774    if (const FrameIndexSDNode *FI =
3775          dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
3776      SV = PseudoSourceValue::getFixedStack(FI->getIndex());
3777
3778  MachineFunction &MF = getMachineFunction();
3779  unsigned Flags = MachineMemOperand::MOLoad;
3780  if (isVolatile)
3781    Flags |= MachineMemOperand::MOVolatile;
3782  MachineMemOperand *MMO =
3783    MF.getMachineMemOperand(SV, Flags, SVOffset,
3784                            MemVT.getStoreSize(), Alignment);
3785  return getLoad(AM, dl, ExtType, VT, Chain, Ptr, Offset, MemVT, MMO);
3786}
3787
3788SDValue
3789SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
3790                      ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
3791                      SDValue Ptr, SDValue Offset, EVT MemVT,
3792                      MachineMemOperand *MMO) {
3793  if (VT == MemVT) {
3794    ExtType = ISD::NON_EXTLOAD;
3795  } else if (ExtType == ISD::NON_EXTLOAD) {
3796    assert(VT == MemVT && "Non-extending load from different memory type!");
3797  } else {
3798    // Extending load.
3799    assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
3800           "Should only be an extending load, not truncating!");
3801    assert(VT.isInteger() == MemVT.isInteger() &&
3802           "Cannot convert from FP to Int or Int -> FP!");
3803    assert(VT.isVector() == MemVT.isVector() &&
3804           "Cannot use trunc store to convert to or from a vector!");
3805    assert((!VT.isVector() ||
3806            VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
3807           "Cannot use trunc store to change the number of vector elements!");
3808  }
3809
3810  bool Indexed = AM != ISD::UNINDEXED;
3811  assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
3812         "Unindexed load with an offset!");
3813
3814  SDVTList VTs = Indexed ?
3815    getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
3816  SDValue Ops[] = { Chain, Ptr, Offset };
3817  FoldingSetNodeID ID;
3818  AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
3819  ID.AddInteger(MemVT.getRawBits());
3820  ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile()));
3821  void *IP = 0;
3822  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3823    cast<LoadSDNode>(E)->refineAlignment(MMO);
3824    return SDValue(E, 0);
3825  }
3826  SDNode *N = NodeAllocator.Allocate<LoadSDNode>();
3827  new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, MemVT, MMO);
3828  CSEMap.InsertNode(N, IP);
3829  AllNodes.push_back(N);
3830  return SDValue(N, 0);
3831}
3832
3833SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
3834                              SDValue Chain, SDValue Ptr,
3835                              const Value *SV, int SVOffset,
3836                              bool isVolatile, unsigned Alignment) {
3837  SDValue Undef = getUNDEF(Ptr.getValueType());
3838  return getLoad(ISD::UNINDEXED, dl, ISD::NON_EXTLOAD, VT, Chain, Ptr, Undef,
3839                 SV, SVOffset, VT, isVolatile, Alignment);
3840}
3841
3842SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
3843                                 SDValue Chain, SDValue Ptr,
3844                                 const Value *SV,
3845                                 int SVOffset, EVT MemVT,
3846                                 bool isVolatile, unsigned Alignment) {
3847  SDValue Undef = getUNDEF(Ptr.getValueType());
3848  return getLoad(ISD::UNINDEXED, dl, ExtType, VT, Chain, Ptr, Undef,
3849                 SV, SVOffset, MemVT, isVolatile, Alignment);
3850}
3851
3852SDValue
3853SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
3854                             SDValue Offset, ISD::MemIndexedMode AM) {
3855  LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
3856  assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
3857         "Load is already a indexed load!");
3858  return getLoad(AM, dl, LD->getExtensionType(), OrigLoad.getValueType(),
3859                 LD->getChain(), Base, Offset, LD->getSrcValue(),
3860                 LD->getSrcValueOffset(), LD->getMemoryVT(),
3861                 LD->isVolatile(), LD->getAlignment());
3862}
3863
3864SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
3865                               SDValue Ptr, const Value *SV, int SVOffset,
3866                               bool isVolatile, unsigned Alignment) {
3867  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3868    Alignment = getEVTAlignment(Val.getValueType());
3869
3870  // Check if the memory reference references a frame index
3871  if (!SV)
3872    if (const FrameIndexSDNode *FI =
3873          dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
3874      SV = PseudoSourceValue::getFixedStack(FI->getIndex());
3875
3876  MachineFunction &MF = getMachineFunction();
3877  unsigned Flags = MachineMemOperand::MOStore;
3878  if (isVolatile)
3879    Flags |= MachineMemOperand::MOVolatile;
3880  MachineMemOperand *MMO =
3881    MF.getMachineMemOperand(SV, Flags, SVOffset,
3882                            Val.getValueType().getStoreSize(), Alignment);
3883
3884  return getStore(Chain, dl, Val, Ptr, MMO);
3885}
3886
3887SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
3888                               SDValue Ptr, MachineMemOperand *MMO) {
3889  EVT VT = Val.getValueType();
3890  SDVTList VTs = getVTList(MVT::Other);
3891  SDValue Undef = getUNDEF(Ptr.getValueType());
3892  SDValue Ops[] = { Chain, Val, Ptr, Undef };
3893  FoldingSetNodeID ID;
3894  AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
3895  ID.AddInteger(VT.getRawBits());
3896  ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile()));
3897  void *IP = 0;
3898  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3899    cast<StoreSDNode>(E)->refineAlignment(MMO);
3900    return SDValue(E, 0);
3901  }
3902  SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
3903  new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false, VT, MMO);
3904  CSEMap.InsertNode(N, IP);
3905  AllNodes.push_back(N);
3906  return SDValue(N, 0);
3907}
3908
3909SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
3910                                    SDValue Ptr, const Value *SV,
3911                                    int SVOffset, EVT SVT,
3912                                    bool isVolatile, unsigned Alignment) {
3913  if (Alignment == 0)  // Ensure that codegen never sees alignment 0
3914    Alignment = getEVTAlignment(SVT);
3915
3916  // Check if the memory reference references a frame index
3917  if (!SV)
3918    if (const FrameIndexSDNode *FI =
3919          dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
3920      SV = PseudoSourceValue::getFixedStack(FI->getIndex());
3921
3922  MachineFunction &MF = getMachineFunction();
3923  unsigned Flags = MachineMemOperand::MOStore;
3924  if (isVolatile)
3925    Flags |= MachineMemOperand::MOVolatile;
3926  MachineMemOperand *MMO =
3927    MF.getMachineMemOperand(SV, Flags, SVOffset, SVT.getStoreSize(), Alignment);
3928
3929  return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
3930}
3931
3932SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
3933                                    SDValue Ptr, EVT SVT,
3934                                    MachineMemOperand *MMO) {
3935  EVT VT = Val.getValueType();
3936
3937  if (VT == SVT)
3938    return getStore(Chain, dl, Val, Ptr, MMO);
3939
3940  assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
3941         "Should only be a truncating store, not extending!");
3942  assert(VT.isInteger() == SVT.isInteger() &&
3943         "Can't do FP-INT conversion!");
3944  assert(VT.isVector() == SVT.isVector() &&
3945         "Cannot use trunc store to convert to or from a vector!");
3946  assert((!VT.isVector() ||
3947          VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
3948         "Cannot use trunc store to change the number of vector elements!");
3949
3950  SDVTList VTs = getVTList(MVT::Other);
3951  SDValue Undef = getUNDEF(Ptr.getValueType());
3952  SDValue Ops[] = { Chain, Val, Ptr, Undef };
3953  FoldingSetNodeID ID;
3954  AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
3955  ID.AddInteger(SVT.getRawBits());
3956  ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile()));
3957  void *IP = 0;
3958  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
3959    cast<StoreSDNode>(E)->refineAlignment(MMO);
3960    return SDValue(E, 0);
3961  }
3962  SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
3963  new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true, SVT, MMO);
3964  CSEMap.InsertNode(N, IP);
3965  AllNodes.push_back(N);
3966  return SDValue(N, 0);
3967}
3968
3969SDValue
3970SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
3971                              SDValue Offset, ISD::MemIndexedMode AM) {
3972  StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
3973  assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
3974         "Store is already a indexed store!");
3975  SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
3976  SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
3977  FoldingSetNodeID ID;
3978  AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
3979  ID.AddInteger(ST->getMemoryVT().getRawBits());
3980  ID.AddInteger(ST->getRawSubclassData());
3981  void *IP = 0;
3982  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3983    return SDValue(E, 0);
3984
3985  SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
3986  new (N) StoreSDNode(Ops, dl, VTs, AM,
3987                      ST->isTruncatingStore(), ST->getMemoryVT(),
3988                      ST->getMemOperand());
3989  CSEMap.InsertNode(N, IP);
3990  AllNodes.push_back(N);
3991  return SDValue(N, 0);
3992}
3993
3994SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
3995                               SDValue Chain, SDValue Ptr,
3996                               SDValue SV) {
3997  SDValue Ops[] = { Chain, Ptr, SV };
3998  return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
3999}
4000
4001SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
4002                              const SDUse *Ops, unsigned NumOps) {
4003  switch (NumOps) {
4004  case 0: return getNode(Opcode, DL, VT);
4005  case 1: return getNode(Opcode, DL, VT, Ops[0]);
4006  case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4007  case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4008  default: break;
4009  }
4010
4011  // Copy from an SDUse array into an SDValue array for use with
4012  // the regular getNode logic.
4013  SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4014  return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4015}
4016
4017SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
4018                              const SDValue *Ops, unsigned NumOps) {
4019  switch (NumOps) {
4020  case 0: return getNode(Opcode, DL, VT);
4021  case 1: return getNode(Opcode, DL, VT, Ops[0]);
4022  case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4023  case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4024  default: break;
4025  }
4026
4027  switch (Opcode) {
4028  default: break;
4029  case ISD::SELECT_CC: {
4030    assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4031    assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4032           "LHS and RHS of condition must have same type!");
4033    assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4034           "True and False arms of SelectCC must have same type!");
4035    assert(Ops[2].getValueType() == VT &&
4036           "select_cc node must be of same type as true and false value!");
4037    break;
4038  }
4039  case ISD::BR_CC: {
4040    assert(NumOps == 5 && "BR_CC takes 5 operands!");
4041    assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4042           "LHS/RHS of comparison should match types!");
4043    break;
4044  }
4045  }
4046
4047  // Memoize nodes.
4048  SDNode *N;
4049  SDVTList VTs = getVTList(VT);
4050
4051  if (VT != MVT::Flag) {
4052    FoldingSetNodeID ID;
4053    AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4054    void *IP = 0;
4055
4056    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4057      return SDValue(E, 0);
4058
4059    N = NodeAllocator.Allocate<SDNode>();
4060    new (N) SDNode(Opcode, DL, VTs, Ops, NumOps);
4061    CSEMap.InsertNode(N, IP);
4062  } else {
4063    N = NodeAllocator.Allocate<SDNode>();
4064    new (N) SDNode(Opcode, DL, VTs, Ops, NumOps);
4065  }
4066
4067  AllNodes.push_back(N);
4068#ifndef NDEBUG
4069  VerifyNode(N);
4070#endif
4071  return SDValue(N, 0);
4072}
4073
4074SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
4075                              const std::vector<EVT> &ResultTys,
4076                              const SDValue *Ops, unsigned NumOps) {
4077  return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4078                 Ops, NumOps);
4079}
4080
4081SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
4082                              const EVT *VTs, unsigned NumVTs,
4083                              const SDValue *Ops, unsigned NumOps) {
4084  if (NumVTs == 1)
4085    return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4086  return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4087}
4088
4089SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4090                              const SDValue *Ops, unsigned NumOps) {
4091  if (VTList.NumVTs == 1)
4092    return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4093
4094#if 0
4095  switch (Opcode) {
4096  // FIXME: figure out how to safely handle things like
4097  // int foo(int x) { return 1 << (x & 255); }
4098  // int bar() { return foo(256); }
4099  case ISD::SRA_PARTS:
4100  case ISD::SRL_PARTS:
4101  case ISD::SHL_PARTS:
4102    if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4103        cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4104      return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4105    else if (N3.getOpcode() == ISD::AND)
4106      if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4107        // If the and is only masking out bits that cannot effect the shift,
4108        // eliminate the and.
4109        unsigned NumBits = VT.getSizeInBits()*2;
4110        if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4111          return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4112      }
4113    break;
4114  }
4115#endif
4116
4117  // Memoize the node unless it returns a flag.
4118  SDNode *N;
4119  if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
4120    FoldingSetNodeID ID;
4121    AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4122    void *IP = 0;
4123    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4124      return SDValue(E, 0);
4125
4126    if (NumOps == 1) {
4127      N = NodeAllocator.Allocate<UnarySDNode>();
4128      new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]);
4129    } else if (NumOps == 2) {
4130      N = NodeAllocator.Allocate<BinarySDNode>();
4131      new (N) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
4132    } else if (NumOps == 3) {
4133      N = NodeAllocator.Allocate<TernarySDNode>();
4134      new (N) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], Ops[2]);
4135    } else {
4136      N = NodeAllocator.Allocate<SDNode>();
4137      new (N) SDNode(Opcode, DL, VTList, Ops, NumOps);
4138    }
4139    CSEMap.InsertNode(N, IP);
4140  } else {
4141    if (NumOps == 1) {
4142      N = NodeAllocator.Allocate<UnarySDNode>();
4143      new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]);
4144    } else if (NumOps == 2) {
4145      N = NodeAllocator.Allocate<BinarySDNode>();
4146      new (N) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
4147    } else if (NumOps == 3) {
4148      N = NodeAllocator.Allocate<TernarySDNode>();
4149      new (N) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], Ops[2]);
4150    } else {
4151      N = NodeAllocator.Allocate<SDNode>();
4152      new (N) SDNode(Opcode, DL, VTList, Ops, NumOps);
4153    }
4154  }
4155  AllNodes.push_back(N);
4156#ifndef NDEBUG
4157  VerifyNode(N);
4158#endif
4159  return SDValue(N, 0);
4160}
4161
4162SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList) {
4163  return getNode(Opcode, DL, VTList, 0, 0);
4164}
4165
4166SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4167                              SDValue N1) {
4168  SDValue Ops[] = { N1 };
4169  return getNode(Opcode, DL, VTList, Ops, 1);
4170}
4171
4172SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4173                              SDValue N1, SDValue N2) {
4174  SDValue Ops[] = { N1, N2 };
4175  return getNode(Opcode, DL, VTList, Ops, 2);
4176}
4177
4178SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4179                              SDValue N1, SDValue N2, SDValue N3) {
4180  SDValue Ops[] = { N1, N2, N3 };
4181  return getNode(Opcode, DL, VTList, Ops, 3);
4182}
4183
4184SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4185                              SDValue N1, SDValue N2, SDValue N3,
4186                              SDValue N4) {
4187  SDValue Ops[] = { N1, N2, N3, N4 };
4188  return getNode(Opcode, DL, VTList, Ops, 4);
4189}
4190
4191SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4192                              SDValue N1, SDValue N2, SDValue N3,
4193                              SDValue N4, SDValue N5) {
4194  SDValue Ops[] = { N1, N2, N3, N4, N5 };
4195  return getNode(Opcode, DL, VTList, Ops, 5);
4196}
4197
4198SDVTList SelectionDAG::getVTList(EVT VT) {
4199  return makeVTList(SDNode::getValueTypeList(VT), 1);
4200}
4201
4202SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4203  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4204       E = VTList.rend(); I != E; ++I)
4205    if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
4206      return *I;
4207
4208  EVT *Array = Allocator.Allocate<EVT>(2);
4209  Array[0] = VT1;
4210  Array[1] = VT2;
4211  SDVTList Result = makeVTList(Array, 2);
4212  VTList.push_back(Result);
4213  return Result;
4214}
4215
4216SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
4217  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4218       E = VTList.rend(); I != E; ++I)
4219    if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4220                          I->VTs[2] == VT3)
4221      return *I;
4222
4223  EVT *Array = Allocator.Allocate<EVT>(3);
4224  Array[0] = VT1;
4225  Array[1] = VT2;
4226  Array[2] = VT3;
4227  SDVTList Result = makeVTList(Array, 3);
4228  VTList.push_back(Result);
4229  return Result;
4230}
4231
4232SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
4233  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4234       E = VTList.rend(); I != E; ++I)
4235    if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4236                          I->VTs[2] == VT3 && I->VTs[3] == VT4)
4237      return *I;
4238
4239  EVT *Array = Allocator.Allocate<EVT>(4);
4240  Array[0] = VT1;
4241  Array[1] = VT2;
4242  Array[2] = VT3;
4243  Array[3] = VT4;
4244  SDVTList Result = makeVTList(Array, 4);
4245  VTList.push_back(Result);
4246  return Result;
4247}
4248
4249SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
4250  switch (NumVTs) {
4251    case 0: llvm_unreachable("Cannot have nodes without results!");
4252    case 1: return getVTList(VTs[0]);
4253    case 2: return getVTList(VTs[0], VTs[1]);
4254    case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
4255    case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]);
4256    default: break;
4257  }
4258
4259  for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4260       E = VTList.rend(); I != E; ++I) {
4261    if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
4262      continue;
4263
4264    bool NoMatch = false;
4265    for (unsigned i = 2; i != NumVTs; ++i)
4266      if (VTs[i] != I->VTs[i]) {
4267        NoMatch = true;
4268        break;
4269      }
4270    if (!NoMatch)
4271      return *I;
4272  }
4273
4274  EVT *Array = Allocator.Allocate<EVT>(NumVTs);
4275  std::copy(VTs, VTs+NumVTs, Array);
4276  SDVTList Result = makeVTList(Array, NumVTs);
4277  VTList.push_back(Result);
4278  return Result;
4279}
4280
4281
4282/// UpdateNodeOperands - *Mutate* the specified node in-place to have the
4283/// specified operands.  If the resultant node already exists in the DAG,
4284/// this does not modify the specified node, instead it returns the node that
4285/// already exists.  If the resultant node does not exist in the DAG, the
4286/// input node is returned.  As a degenerate case, if you specify the same
4287/// input operands as the node already has, the input node is returned.
4288SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
4289  SDNode *N = InN.getNode();
4290  assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
4291
4292  // Check to see if there is no change.
4293  if (Op == N->getOperand(0)) return InN;
4294
4295  // See if the modified node already exists.
4296  void *InsertPos = 0;
4297  if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
4298    return SDValue(Existing, InN.getResNo());
4299
4300  // Nope it doesn't.  Remove the node from its current place in the maps.
4301  if (InsertPos)
4302    if (!RemoveNodeFromCSEMaps(N))
4303      InsertPos = 0;
4304
4305  // Now we update the operands.
4306  N->OperandList[0].set(Op);
4307
4308  // If this gets put into a CSE map, add it.
4309  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4310  return InN;
4311}
4312
4313SDValue SelectionDAG::
4314UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
4315  SDNode *N = InN.getNode();
4316  assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
4317
4318  // Check to see if there is no change.
4319  if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
4320    return InN;   // No operands changed, just return the input node.
4321
4322  // See if the modified node already exists.
4323  void *InsertPos = 0;
4324  if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
4325    return SDValue(Existing, InN.getResNo());
4326
4327  // Nope it doesn't.  Remove the node from its current place in the maps.
4328  if (InsertPos)
4329    if (!RemoveNodeFromCSEMaps(N))
4330      InsertPos = 0;
4331
4332  // Now we update the operands.
4333  if (N->OperandList[0] != Op1)
4334    N->OperandList[0].set(Op1);
4335  if (N->OperandList[1] != Op2)
4336    N->OperandList[1].set(Op2);
4337
4338  // If this gets put into a CSE map, add it.
4339  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4340  return InN;
4341}
4342
4343SDValue SelectionDAG::
4344UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, SDValue Op3) {
4345  SDValue Ops[] = { Op1, Op2, Op3 };
4346  return UpdateNodeOperands(N, Ops, 3);
4347}
4348
4349SDValue SelectionDAG::
4350UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
4351                   SDValue Op3, SDValue Op4) {
4352  SDValue Ops[] = { Op1, Op2, Op3, Op4 };
4353  return UpdateNodeOperands(N, Ops, 4);
4354}
4355
4356SDValue SelectionDAG::
4357UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
4358                   SDValue Op3, SDValue Op4, SDValue Op5) {
4359  SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
4360  return UpdateNodeOperands(N, Ops, 5);
4361}
4362
4363SDValue SelectionDAG::
4364UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
4365  SDNode *N = InN.getNode();
4366  assert(N->getNumOperands() == NumOps &&
4367         "Update with wrong number of operands");
4368
4369  // Check to see if there is no change.
4370  bool AnyChange = false;
4371  for (unsigned i = 0; i != NumOps; ++i) {
4372    if (Ops[i] != N->getOperand(i)) {
4373      AnyChange = true;
4374      break;
4375    }
4376  }
4377
4378  // No operands changed, just return the input node.
4379  if (!AnyChange) return InN;
4380
4381  // See if the modified node already exists.
4382  void *InsertPos = 0;
4383  if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
4384    return SDValue(Existing, InN.getResNo());
4385
4386  // Nope it doesn't.  Remove the node from its current place in the maps.
4387  if (InsertPos)
4388    if (!RemoveNodeFromCSEMaps(N))
4389      InsertPos = 0;
4390
4391  // Now we update the operands.
4392  for (unsigned i = 0; i != NumOps; ++i)
4393    if (N->OperandList[i] != Ops[i])
4394      N->OperandList[i].set(Ops[i]);
4395
4396  // If this gets put into a CSE map, add it.
4397  if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4398  return InN;
4399}
4400
4401/// DropOperands - Release the operands and set this node to have
4402/// zero operands.
4403void SDNode::DropOperands() {
4404  // Unlike the code in MorphNodeTo that does this, we don't need to
4405  // watch for dead nodes here.
4406  for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
4407    SDUse &Use = *I++;
4408    Use.set(SDValue());
4409  }
4410}
4411
4412/// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
4413/// machine opcode.
4414///
4415SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4416                                   EVT VT) {
4417  SDVTList VTs = getVTList(VT);
4418  return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
4419}
4420
4421SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4422                                   EVT VT, SDValue Op1) {
4423  SDVTList VTs = getVTList(VT);
4424  SDValue Ops[] = { Op1 };
4425  return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
4426}
4427
4428SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4429                                   EVT VT, SDValue Op1,
4430                                   SDValue Op2) {
4431  SDVTList VTs = getVTList(VT);
4432  SDValue Ops[] = { Op1, Op2 };
4433  return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
4434}
4435
4436SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4437                                   EVT VT, SDValue Op1,
4438                                   SDValue Op2, SDValue Op3) {
4439  SDVTList VTs = getVTList(VT);
4440  SDValue Ops[] = { Op1, Op2, Op3 };
4441  return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
4442}
4443
4444SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4445                                   EVT VT, const SDValue *Ops,
4446                                   unsigned NumOps) {
4447  SDVTList VTs = getVTList(VT);
4448  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4449}
4450
4451SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4452                                   EVT VT1, EVT VT2, const SDValue *Ops,
4453                                   unsigned NumOps) {
4454  SDVTList VTs = getVTList(VT1, VT2);
4455  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4456}
4457
4458SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4459                                   EVT VT1, EVT VT2) {
4460  SDVTList VTs = getVTList(VT1, VT2);
4461  return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
4462}
4463
4464SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4465                                   EVT VT1, EVT VT2, EVT VT3,
4466                                   const SDValue *Ops, unsigned NumOps) {
4467  SDVTList VTs = getVTList(VT1, VT2, VT3);
4468  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4469}
4470
4471SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4472                                   EVT VT1, EVT VT2, EVT VT3, EVT VT4,
4473                                   const SDValue *Ops, unsigned NumOps) {
4474  SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
4475  return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
4476}
4477
4478SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4479                                   EVT VT1, EVT VT2,
4480                                   SDValue Op1) {
4481  SDVTList VTs = getVTList(VT1, VT2);
4482  SDValue Ops[] = { Op1 };
4483  return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
4484}
4485
4486SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4487                                   EVT VT1, EVT VT2,
4488                                   SDValue Op1, SDValue Op2) {
4489  SDVTList VTs = getVTList(VT1, VT2);
4490  SDValue Ops[] = { Op1, Op2 };
4491  return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
4492}
4493
4494SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4495                                   EVT VT1, EVT VT2,
4496                                   SDValue Op1, SDValue Op2,
4497                                   SDValue Op3) {
4498  SDVTList VTs = getVTList(VT1, VT2);
4499  SDValue Ops[] = { Op1, Op2, Op3 };
4500  return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
4501}
4502
4503SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4504                                   EVT VT1, EVT VT2, EVT VT3,
4505                                   SDValue Op1, SDValue Op2,
4506                                   SDValue Op3) {
4507  SDVTList VTs = getVTList(VT1, VT2, VT3);
4508  SDValue Ops[] = { Op1, Op2, Op3 };
4509  return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
4510}
4511
4512SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
4513                                   SDVTList VTs, const SDValue *Ops,
4514                                   unsigned NumOps) {
4515  return MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
4516}
4517
4518SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4519                                  EVT VT) {
4520  SDVTList VTs = getVTList(VT);
4521  return MorphNodeTo(N, Opc, VTs, 0, 0);
4522}
4523
4524SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4525                                  EVT VT, SDValue Op1) {
4526  SDVTList VTs = getVTList(VT);
4527  SDValue Ops[] = { Op1 };
4528  return MorphNodeTo(N, Opc, VTs, Ops, 1);
4529}
4530
4531SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4532                                  EVT VT, SDValue Op1,
4533                                  SDValue Op2) {
4534  SDVTList VTs = getVTList(VT);
4535  SDValue Ops[] = { Op1, Op2 };
4536  return MorphNodeTo(N, Opc, VTs, Ops, 2);
4537}
4538
4539SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4540                                  EVT VT, SDValue Op1,
4541                                  SDValue Op2, SDValue Op3) {
4542  SDVTList VTs = getVTList(VT);
4543  SDValue Ops[] = { Op1, Op2, Op3 };
4544  return MorphNodeTo(N, Opc, VTs, Ops, 3);
4545}
4546
4547SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4548                                  EVT VT, const SDValue *Ops,
4549                                  unsigned NumOps) {
4550  SDVTList VTs = getVTList(VT);
4551  return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
4552}
4553
4554SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4555                                  EVT VT1, EVT VT2, const SDValue *Ops,
4556                                  unsigned NumOps) {
4557  SDVTList VTs = getVTList(VT1, VT2);
4558  return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
4559}
4560
4561SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4562                                  EVT VT1, EVT VT2) {
4563  SDVTList VTs = getVTList(VT1, VT2);
4564  return MorphNodeTo(N, Opc, VTs, (SDValue *)0, 0);
4565}
4566
4567SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4568                                  EVT VT1, EVT VT2, EVT VT3,
4569                                  const SDValue *Ops, unsigned NumOps) {
4570  SDVTList VTs = getVTList(VT1, VT2, VT3);
4571  return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
4572}
4573
4574SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4575                                  EVT VT1, EVT VT2,
4576                                  SDValue Op1) {
4577  SDVTList VTs = getVTList(VT1, VT2);
4578  SDValue Ops[] = { Op1 };
4579  return MorphNodeTo(N, Opc, VTs, Ops, 1);
4580}
4581
4582SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4583                                  EVT VT1, EVT VT2,
4584                                  SDValue Op1, SDValue Op2) {
4585  SDVTList VTs = getVTList(VT1, VT2);
4586  SDValue Ops[] = { Op1, Op2 };
4587  return MorphNodeTo(N, Opc, VTs, Ops, 2);
4588}
4589
4590SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4591                                  EVT VT1, EVT VT2,
4592                                  SDValue Op1, SDValue Op2,
4593                                  SDValue Op3) {
4594  SDVTList VTs = getVTList(VT1, VT2);
4595  SDValue Ops[] = { Op1, Op2, Op3 };
4596  return MorphNodeTo(N, Opc, VTs, Ops, 3);
4597}
4598
4599/// MorphNodeTo - These *mutate* the specified node to have the specified
4600/// return type, opcode, and operands.
4601///
4602/// Note that MorphNodeTo returns the resultant node.  If there is already a
4603/// node of the specified opcode and operands, it returns that node instead of
4604/// the current one.  Note that the DebugLoc need not be the same.
4605///
4606/// Using MorphNodeTo is faster than creating a new node and swapping it in
4607/// with ReplaceAllUsesWith both because it often avoids allocating a new
4608/// node, and because it doesn't require CSE recalculation for any of
4609/// the node's users.
4610///
4611SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
4612                                  SDVTList VTs, const SDValue *Ops,
4613                                  unsigned NumOps) {
4614  // If an identical node already exists, use it.
4615  void *IP = 0;
4616  if (VTs.VTs[VTs.NumVTs-1] != MVT::Flag) {
4617    FoldingSetNodeID ID;
4618    AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
4619    if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
4620      return ON;
4621  }
4622
4623  if (!RemoveNodeFromCSEMaps(N))
4624    IP = 0;
4625
4626  // Start the morphing.
4627  N->NodeType = Opc;
4628  N->ValueList = VTs.VTs;
4629  N->NumValues = VTs.NumVTs;
4630
4631  // Clear the operands list, updating used nodes to remove this from their
4632  // use list.  Keep track of any operands that become dead as a result.
4633  SmallPtrSet<SDNode*, 16> DeadNodeSet;
4634  for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
4635    SDUse &Use = *I++;
4636    SDNode *Used = Use.getNode();
4637    Use.set(SDValue());
4638    if (Used->use_empty())
4639      DeadNodeSet.insert(Used);
4640  }
4641
4642  if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
4643    // Initialize the memory references information.
4644    MN->setMemRefs(0, 0);
4645    // If NumOps is larger than the # of operands we can have in a
4646    // MachineSDNode, reallocate the operand list.
4647    if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
4648      if (MN->OperandsNeedDelete)
4649        delete[] MN->OperandList;
4650      if (NumOps > array_lengthof(MN->LocalOperands))
4651        // We're creating a final node that will live unmorphed for the
4652        // remainder of the current SelectionDAG iteration, so we can allocate
4653        // the operands directly out of a pool with no recycling metadata.
4654        MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
4655                        Ops, NumOps);
4656      else
4657        MN->InitOperands(MN->LocalOperands, Ops, NumOps);
4658      MN->OperandsNeedDelete = false;
4659    } else
4660      MN->InitOperands(MN->OperandList, Ops, NumOps);
4661  } else {
4662    // If NumOps is larger than the # of operands we currently have, reallocate
4663    // the operand list.
4664    if (NumOps > N->NumOperands) {
4665      if (N->OperandsNeedDelete)
4666        delete[] N->OperandList;
4667      N->InitOperands(new SDUse[NumOps], Ops, NumOps);
4668      N->OperandsNeedDelete = true;
4669    } else
4670      N->InitOperands(N->OperandList, Ops, NumOps);
4671  }
4672
4673  // Delete any nodes that are still dead after adding the uses for the
4674  // new operands.
4675  SmallVector<SDNode *, 16> DeadNodes;
4676  for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
4677       E = DeadNodeSet.end(); I != E; ++I)
4678    if ((*I)->use_empty())
4679      DeadNodes.push_back(*I);
4680  RemoveDeadNodes(DeadNodes);
4681
4682  if (IP)
4683    CSEMap.InsertNode(N, IP);   // Memoize the new node.
4684  return N;
4685}
4686
4687
4688/// getMachineNode - These are used for target selectors to create a new node
4689/// with specified return type(s), MachineInstr opcode, and operands.
4690///
4691/// Note that getMachineNode returns the resultant node.  If there is already a
4692/// node of the specified opcode and operands, it returns that node instead of
4693/// the current one.
4694MachineSDNode *
4695SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) {
4696  SDVTList VTs = getVTList(VT);
4697  return getMachineNode(Opcode, dl, VTs, 0, 0);
4698}
4699
4700MachineSDNode *
4701SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1) {
4702  SDVTList VTs = getVTList(VT);
4703  SDValue Ops[] = { Op1 };
4704  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4705}
4706
4707MachineSDNode *
4708SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
4709                             SDValue Op1, SDValue Op2) {
4710  SDVTList VTs = getVTList(VT);
4711  SDValue Ops[] = { Op1, Op2 };
4712  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4713}
4714
4715MachineSDNode *
4716SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
4717                             SDValue Op1, SDValue Op2, SDValue Op3) {
4718  SDVTList VTs = getVTList(VT);
4719  SDValue Ops[] = { Op1, Op2, Op3 };
4720  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4721}
4722
4723MachineSDNode *
4724SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
4725                             const SDValue *Ops, unsigned NumOps) {
4726  SDVTList VTs = getVTList(VT);
4727  return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
4728}
4729
4730MachineSDNode *
4731SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2) {
4732  SDVTList VTs = getVTList(VT1, VT2);
4733  return getMachineNode(Opcode, dl, VTs, 0, 0);
4734}
4735
4736MachineSDNode *
4737SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4738                             EVT VT1, EVT VT2, SDValue Op1) {
4739  SDVTList VTs = getVTList(VT1, VT2);
4740  SDValue Ops[] = { Op1 };
4741  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4742}
4743
4744MachineSDNode *
4745SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4746                             EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
4747  SDVTList VTs = getVTList(VT1, VT2);
4748  SDValue Ops[] = { Op1, Op2 };
4749  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4750}
4751
4752MachineSDNode *
4753SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4754                             EVT VT1, EVT VT2, SDValue Op1,
4755                             SDValue Op2, SDValue Op3) {
4756  SDVTList VTs = getVTList(VT1, VT2);
4757  SDValue Ops[] = { Op1, Op2, Op3 };
4758  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4759}
4760
4761MachineSDNode *
4762SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4763                             EVT VT1, EVT VT2,
4764                             const SDValue *Ops, unsigned NumOps) {
4765  SDVTList VTs = getVTList(VT1, VT2);
4766  return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
4767}
4768
4769MachineSDNode *
4770SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4771                             EVT VT1, EVT VT2, EVT VT3,
4772                             SDValue Op1, SDValue Op2) {
4773  SDVTList VTs = getVTList(VT1, VT2, VT3);
4774  SDValue Ops[] = { Op1, Op2 };
4775  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4776}
4777
4778MachineSDNode *
4779SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4780                             EVT VT1, EVT VT2, EVT VT3,
4781                             SDValue Op1, SDValue Op2, SDValue Op3) {
4782  SDVTList VTs = getVTList(VT1, VT2, VT3);
4783  SDValue Ops[] = { Op1, Op2, Op3 };
4784  return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
4785}
4786
4787MachineSDNode *
4788SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4789                             EVT VT1, EVT VT2, EVT VT3,
4790                             const SDValue *Ops, unsigned NumOps) {
4791  SDVTList VTs = getVTList(VT1, VT2, VT3);
4792  return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
4793}
4794
4795MachineSDNode *
4796SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
4797                             EVT VT2, EVT VT3, EVT VT4,
4798                             const SDValue *Ops, unsigned NumOps) {
4799  SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
4800  return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
4801}
4802
4803MachineSDNode *
4804SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
4805                             const std::vector<EVT> &ResultTys,
4806                             const SDValue *Ops, unsigned NumOps) {
4807  SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
4808  return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
4809}
4810
4811MachineSDNode *
4812SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
4813                             const SDValue *Ops, unsigned NumOps) {
4814  bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Flag;
4815  MachineSDNode *N;
4816  void *IP;
4817
4818  if (DoCSE) {
4819    FoldingSetNodeID ID;
4820    AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
4821    IP = 0;
4822    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4823      return cast<MachineSDNode>(E);
4824  }
4825
4826  // Allocate a new MachineSDNode.
4827  N = NodeAllocator.Allocate<MachineSDNode>();
4828  new (N) MachineSDNode(~Opcode, DL, VTs);
4829
4830  // Initialize the operands list.
4831  if (NumOps > array_lengthof(N->LocalOperands))
4832    // We're creating a final node that will live unmorphed for the
4833    // remainder of the current SelectionDAG iteration, so we can allocate
4834    // the operands directly out of a pool with no recycling metadata.
4835    N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
4836                    Ops, NumOps);
4837  else
4838    N->InitOperands(N->LocalOperands, Ops, NumOps);
4839  N->OperandsNeedDelete = false;
4840
4841  if (DoCSE)
4842    CSEMap.InsertNode(N, IP);
4843
4844  AllNodes.push_back(N);
4845#ifndef NDEBUG
4846  VerifyNode(N);
4847#endif
4848  return N;
4849}
4850
4851/// getTargetExtractSubreg - A convenience function for creating
4852/// TargetInstrInfo::EXTRACT_SUBREG nodes.
4853SDValue
4854SelectionDAG::getTargetExtractSubreg(int SRIdx, DebugLoc DL, EVT VT,
4855                                     SDValue Operand) {
4856  SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
4857  SDNode *Subreg = getMachineNode(TargetInstrInfo::EXTRACT_SUBREG, DL,
4858                                  VT, Operand, SRIdxVal);
4859  return SDValue(Subreg, 0);
4860}
4861
4862/// getTargetInsertSubreg - A convenience function for creating
4863/// TargetInstrInfo::INSERT_SUBREG nodes.
4864SDValue
4865SelectionDAG::getTargetInsertSubreg(int SRIdx, DebugLoc DL, EVT VT,
4866                                    SDValue Operand, SDValue Subreg) {
4867  SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
4868  SDNode *Result = getMachineNode(TargetInstrInfo::INSERT_SUBREG, DL,
4869                                  VT, Operand, Subreg, SRIdxVal);
4870  return SDValue(Result, 0);
4871}
4872
4873/// getNodeIfExists - Get the specified node if it's already available, or
4874/// else return NULL.
4875SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
4876                                      const SDValue *Ops, unsigned NumOps) {
4877  if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
4878    FoldingSetNodeID ID;
4879    AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4880    void *IP = 0;
4881    if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4882      return E;
4883  }
4884  return NULL;
4885}
4886
4887/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
4888/// This can cause recursive merging of nodes in the DAG.
4889///
4890/// This version assumes From has a single result value.
4891///
4892void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
4893                                      DAGUpdateListener *UpdateListener) {
4894  SDNode *From = FromN.getNode();
4895  assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
4896         "Cannot replace with this method!");
4897  assert(From != To.getNode() && "Cannot replace uses of with self");
4898
4899  // Iterate over all the existing uses of From. New uses will be added
4900  // to the beginning of the use list, which we avoid visiting.
4901  // This specifically avoids visiting uses of From that arise while the
4902  // replacement is happening, because any such uses would be the result
4903  // of CSE: If an existing node looks like From after one of its operands
4904  // is replaced by To, we don't want to replace of all its users with To
4905  // too. See PR3018 for more info.
4906  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
4907  while (UI != UE) {
4908    SDNode *User = *UI;
4909
4910    // This node is about to morph, remove its old self from the CSE maps.
4911    RemoveNodeFromCSEMaps(User);
4912
4913    // A user can appear in a use list multiple times, and when this
4914    // happens the uses are usually next to each other in the list.
4915    // To help reduce the number of CSE recomputations, process all
4916    // the uses of this user that we can find this way.
4917    do {
4918      SDUse &Use = UI.getUse();
4919      ++UI;
4920      Use.set(To);
4921    } while (UI != UE && *UI == User);
4922
4923    // Now that we have modified User, add it back to the CSE maps.  If it
4924    // already exists there, recursively merge the results together.
4925    AddModifiedNodeToCSEMaps(User, UpdateListener);
4926  }
4927}
4928
4929/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
4930/// This can cause recursive merging of nodes in the DAG.
4931///
4932/// This version assumes that for each value of From, there is a
4933/// corresponding value in To in the same position with the same type.
4934///
4935void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
4936                                      DAGUpdateListener *UpdateListener) {
4937#ifndef NDEBUG
4938  for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
4939    assert((!From->hasAnyUseOfValue(i) ||
4940            From->getValueType(i) == To->getValueType(i)) &&
4941           "Cannot use this version of ReplaceAllUsesWith!");
4942#endif
4943
4944  // Handle the trivial case.
4945  if (From == To)
4946    return;
4947
4948  // Iterate over just the existing users of From. See the comments in
4949  // the ReplaceAllUsesWith above.
4950  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
4951  while (UI != UE) {
4952    SDNode *User = *UI;
4953
4954    // This node is about to morph, remove its old self from the CSE maps.
4955    RemoveNodeFromCSEMaps(User);
4956
4957    // A user can appear in a use list multiple times, and when this
4958    // happens the uses are usually next to each other in the list.
4959    // To help reduce the number of CSE recomputations, process all
4960    // the uses of this user that we can find this way.
4961    do {
4962      SDUse &Use = UI.getUse();
4963      ++UI;
4964      Use.setNode(To);
4965    } while (UI != UE && *UI == User);
4966
4967    // Now that we have modified User, add it back to the CSE maps.  If it
4968    // already exists there, recursively merge the results together.
4969    AddModifiedNodeToCSEMaps(User, UpdateListener);
4970  }
4971}
4972
4973/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
4974/// This can cause recursive merging of nodes in the DAG.
4975///
4976/// This version can replace From with any result values.  To must match the
4977/// number and types of values returned by From.
4978void SelectionDAG::ReplaceAllUsesWith(SDNode *From,
4979                                      const SDValue *To,
4980                                      DAGUpdateListener *UpdateListener) {
4981  if (From->getNumValues() == 1)  // Handle the simple case efficiently.
4982    return ReplaceAllUsesWith(SDValue(From, 0), To[0], UpdateListener);
4983
4984  // Iterate over just the existing users of From. See the comments in
4985  // the ReplaceAllUsesWith above.
4986  SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
4987  while (UI != UE) {
4988    SDNode *User = *UI;
4989
4990    // This node is about to morph, remove its old self from the CSE maps.
4991    RemoveNodeFromCSEMaps(User);
4992
4993    // A user can appear in a use list multiple times, and when this
4994    // happens the uses are usually next to each other in the list.
4995    // To help reduce the number of CSE recomputations, process all
4996    // the uses of this user that we can find this way.
4997    do {
4998      SDUse &Use = UI.getUse();
4999      const SDValue &ToOp = To[Use.getResNo()];
5000      ++UI;
5001      Use.set(ToOp);
5002    } while (UI != UE && *UI == User);
5003
5004    // Now that we have modified User, add it back to the CSE maps.  If it
5005    // already exists there, recursively merge the results together.
5006    AddModifiedNodeToCSEMaps(User, UpdateListener);
5007  }
5008}
5009
5010/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5011/// uses of other values produced by From.getNode() alone.  The Deleted
5012/// vector is handled the same way as for ReplaceAllUsesWith.
5013void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
5014                                             DAGUpdateListener *UpdateListener){
5015  // Handle the really simple, really trivial case efficiently.
5016  if (From == To) return;
5017
5018  // Handle the simple, trivial, case efficiently.
5019  if (From.getNode()->getNumValues() == 1) {
5020    ReplaceAllUsesWith(From, To, UpdateListener);
5021    return;
5022  }
5023
5024  // Iterate over just the existing users of From. See the comments in
5025  // the ReplaceAllUsesWith above.
5026  SDNode::use_iterator UI = From.getNode()->use_begin(),
5027                       UE = From.getNode()->use_end();
5028  while (UI != UE) {
5029    SDNode *User = *UI;
5030    bool UserRemovedFromCSEMaps = false;
5031
5032    // A user can appear in a use list multiple times, and when this
5033    // happens the uses are usually next to each other in the list.
5034    // To help reduce the number of CSE recomputations, process all
5035    // the uses of this user that we can find this way.
5036    do {
5037      SDUse &Use = UI.getUse();
5038
5039      // Skip uses of different values from the same node.
5040      if (Use.getResNo() != From.getResNo()) {
5041        ++UI;
5042        continue;
5043      }
5044
5045      // If this node hasn't been modified yet, it's still in the CSE maps,
5046      // so remove its old self from the CSE maps.
5047      if (!UserRemovedFromCSEMaps) {
5048        RemoveNodeFromCSEMaps(User);
5049        UserRemovedFromCSEMaps = true;
5050      }
5051
5052      ++UI;
5053      Use.set(To);
5054    } while (UI != UE && *UI == User);
5055
5056    // We are iterating over all uses of the From node, so if a use
5057    // doesn't use the specific value, no changes are made.
5058    if (!UserRemovedFromCSEMaps)
5059      continue;
5060
5061    // Now that we have modified User, add it back to the CSE maps.  If it
5062    // already exists there, recursively merge the results together.
5063    AddModifiedNodeToCSEMaps(User, UpdateListener);
5064  }
5065}
5066
5067namespace {
5068  /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5069  /// to record information about a use.
5070  struct UseMemo {
5071    SDNode *User;
5072    unsigned Index;
5073    SDUse *Use;
5074  };
5075
5076  /// operator< - Sort Memos by User.
5077  bool operator<(const UseMemo &L, const UseMemo &R) {
5078    return (intptr_t)L.User < (intptr_t)R.User;
5079  }
5080}
5081
5082/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5083/// uses of other values produced by From.getNode() alone.  The same value
5084/// may appear in both the From and To list.  The Deleted vector is
5085/// handled the same way as for ReplaceAllUsesWith.
5086void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5087                                              const SDValue *To,
5088                                              unsigned Num,
5089                                              DAGUpdateListener *UpdateListener){
5090  // Handle the simple, trivial case efficiently.
5091  if (Num == 1)
5092    return ReplaceAllUsesOfValueWith(*From, *To, UpdateListener);
5093
5094  // Read up all the uses and make records of them. This helps
5095  // processing new uses that are introduced during the
5096  // replacement process.
5097  SmallVector<UseMemo, 4> Uses;
5098  for (unsigned i = 0; i != Num; ++i) {
5099    unsigned FromResNo = From[i].getResNo();
5100    SDNode *FromNode = From[i].getNode();
5101    for (SDNode::use_iterator UI = FromNode->use_begin(),
5102         E = FromNode->use_end(); UI != E; ++UI) {
5103      SDUse &Use = UI.getUse();
5104      if (Use.getResNo() == FromResNo) {
5105        UseMemo Memo = { *UI, i, &Use };
5106        Uses.push_back(Memo);
5107      }
5108    }
5109  }
5110
5111  // Sort the uses, so that all the uses from a given User are together.
5112  std::sort(Uses.begin(), Uses.end());
5113
5114  for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5115       UseIndex != UseIndexEnd; ) {
5116    // We know that this user uses some value of From.  If it is the right
5117    // value, update it.
5118    SDNode *User = Uses[UseIndex].User;
5119
5120    // This node is about to morph, remove its old self from the CSE maps.
5121    RemoveNodeFromCSEMaps(User);
5122
5123    // The Uses array is sorted, so all the uses for a given User
5124    // are next to each other in the list.
5125    // To help reduce the number of CSE recomputations, process all
5126    // the uses of this user that we can find this way.
5127    do {
5128      unsigned i = Uses[UseIndex].Index;
5129      SDUse &Use = *Uses[UseIndex].Use;
5130      ++UseIndex;
5131
5132      Use.set(To[i]);
5133    } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5134
5135    // Now that we have modified User, add it back to the CSE maps.  If it
5136    // already exists there, recursively merge the results together.
5137    AddModifiedNodeToCSEMaps(User, UpdateListener);
5138  }
5139}
5140
5141/// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5142/// based on their topological order. It returns the maximum id and a vector
5143/// of the SDNodes* in assigned order by reference.
5144unsigned SelectionDAG::AssignTopologicalOrder() {
5145
5146  unsigned DAGSize = 0;
5147
5148  // SortedPos tracks the progress of the algorithm. Nodes before it are
5149  // sorted, nodes after it are unsorted. When the algorithm completes
5150  // it is at the end of the list.
5151  allnodes_iterator SortedPos = allnodes_begin();
5152
5153  // Visit all the nodes. Move nodes with no operands to the front of
5154  // the list immediately. Annotate nodes that do have operands with their
5155  // operand count. Before we do this, the Node Id fields of the nodes
5156  // may contain arbitrary values. After, the Node Id fields for nodes
5157  // before SortedPos will contain the topological sort index, and the
5158  // Node Id fields for nodes At SortedPos and after will contain the
5159  // count of outstanding operands.
5160  for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5161    SDNode *N = I++;
5162    unsigned Degree = N->getNumOperands();
5163    if (Degree == 0) {
5164      // A node with no uses, add it to the result array immediately.
5165      N->setNodeId(DAGSize++);
5166      allnodes_iterator Q = N;
5167      if (Q != SortedPos)
5168        SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5169      ++SortedPos;
5170    } else {
5171      // Temporarily use the Node Id as scratch space for the degree count.
5172      N->setNodeId(Degree);
5173    }
5174  }
5175
5176  // Visit all the nodes. As we iterate, moves nodes into sorted order,
5177  // such that by the time the end is reached all nodes will be sorted.
5178  for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5179    SDNode *N = I;
5180    for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5181         UI != UE; ++UI) {
5182      SDNode *P = *UI;
5183      unsigned Degree = P->getNodeId();
5184      --Degree;
5185      if (Degree == 0) {
5186        // All of P's operands are sorted, so P may sorted now.
5187        P->setNodeId(DAGSize++);
5188        if (P != SortedPos)
5189          SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5190        ++SortedPos;
5191      } else {
5192        // Update P's outstanding operand count.
5193        P->setNodeId(Degree);
5194      }
5195    }
5196  }
5197
5198  assert(SortedPos == AllNodes.end() &&
5199         "Topological sort incomplete!");
5200  assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
5201         "First node in topological sort is not the entry token!");
5202  assert(AllNodes.front().getNodeId() == 0 &&
5203         "First node in topological sort has non-zero id!");
5204  assert(AllNodes.front().getNumOperands() == 0 &&
5205         "First node in topological sort has operands!");
5206  assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
5207         "Last node in topologic sort has unexpected id!");
5208  assert(AllNodes.back().use_empty() &&
5209         "Last node in topologic sort has users!");
5210  assert(DAGSize == allnodes_size() && "Node count mismatch!");
5211  return DAGSize;
5212}
5213
5214/// AssignOrdering - Assign an order to the SDNode.
5215void SelectionDAG::AssignOrdering(SDNode *SD, unsigned Order) {
5216  assert(SD && "Trying to assign an order to a null node!");
5217  if (Ordering)
5218    Ordering->add(SD, Order);
5219}
5220
5221
5222//===----------------------------------------------------------------------===//
5223//                              SDNode Class
5224//===----------------------------------------------------------------------===//
5225
5226HandleSDNode::~HandleSDNode() {
5227  DropOperands();
5228}
5229
5230GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA,
5231                                         EVT VT, int64_t o, unsigned char TF)
5232  : SDNode(Opc, DebugLoc::getUnknownLoc(), getSDVTList(VT)),
5233    Offset(o), TargetFlags(TF) {
5234  TheGlobal = const_cast<GlobalValue*>(GA);
5235}
5236
5237MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
5238                     MachineMemOperand *mmo)
5239 : SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) {
5240  SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile());
5241  assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5242  assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5243}
5244
5245MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
5246                     const SDValue *Ops, unsigned NumOps, EVT memvt,
5247                     MachineMemOperand *mmo)
5248   : SDNode(Opc, dl, VTs, Ops, NumOps),
5249     MemoryVT(memvt), MMO(mmo) {
5250  SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile());
5251  assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5252  assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5253}
5254
5255/// Profile - Gather unique data for the node.
5256///
5257void SDNode::Profile(FoldingSetNodeID &ID) const {
5258  AddNodeIDNode(ID, this);
5259}
5260
5261namespace {
5262  struct EVTArray {
5263    std::vector<EVT> VTs;
5264
5265    EVTArray() {
5266      VTs.reserve(MVT::LAST_VALUETYPE);
5267      for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
5268        VTs.push_back(MVT((MVT::SimpleValueType)i));
5269    }
5270  };
5271}
5272
5273static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
5274static ManagedStatic<EVTArray> SimpleVTArray;
5275static ManagedStatic<sys::SmartMutex<true> > VTMutex;
5276
5277/// getValueTypeList - Return a pointer to the specified value type.
5278///
5279const EVT *SDNode::getValueTypeList(EVT VT) {
5280  if (VT.isExtended()) {
5281    sys::SmartScopedLock<true> Lock(*VTMutex);
5282    return &(*EVTs->insert(VT).first);
5283  } else {
5284    return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
5285  }
5286}
5287
5288/// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
5289/// indicated value.  This method ignores uses of other values defined by this
5290/// operation.
5291bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
5292  assert(Value < getNumValues() && "Bad value!");
5293
5294  // TODO: Only iterate over uses of a given value of the node
5295  for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
5296    if (UI.getUse().getResNo() == Value) {
5297      if (NUses == 0)
5298        return false;
5299      --NUses;
5300    }
5301  }
5302
5303  // Found exactly the right number of uses?
5304  return NUses == 0;
5305}
5306
5307
5308/// hasAnyUseOfValue - Return true if there are any use of the indicated
5309/// value. This method ignores uses of other values defined by this operation.
5310bool SDNode::hasAnyUseOfValue(unsigned Value) const {
5311  assert(Value < getNumValues() && "Bad value!");
5312
5313  for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
5314    if (UI.getUse().getResNo() == Value)
5315      return true;
5316
5317  return false;
5318}
5319
5320
5321/// isOnlyUserOf - Return true if this node is the only use of N.
5322///
5323bool SDNode::isOnlyUserOf(SDNode *N) const {
5324  bool Seen = false;
5325  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
5326    SDNode *User = *I;
5327    if (User == this)
5328      Seen = true;
5329    else
5330      return false;
5331  }
5332
5333  return Seen;
5334}
5335
5336/// isOperand - Return true if this node is an operand of N.
5337///
5338bool SDValue::isOperandOf(SDNode *N) const {
5339  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5340    if (*this == N->getOperand(i))
5341      return true;
5342  return false;
5343}
5344
5345bool SDNode::isOperandOf(SDNode *N) const {
5346  for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
5347    if (this == N->OperandList[i].getNode())
5348      return true;
5349  return false;
5350}
5351
5352/// reachesChainWithoutSideEffects - Return true if this operand (which must
5353/// be a chain) reaches the specified operand without crossing any
5354/// side-effecting instructions.  In practice, this looks through token
5355/// factors and non-volatile loads.  In order to remain efficient, this only
5356/// looks a couple of nodes in, it does not do an exhaustive search.
5357bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
5358                                               unsigned Depth) const {
5359  if (*this == Dest) return true;
5360
5361  // Don't search too deeply, we just want to be able to see through
5362  // TokenFactor's etc.
5363  if (Depth == 0) return false;
5364
5365  // If this is a token factor, all inputs to the TF happen in parallel.  If any
5366  // of the operands of the TF reach dest, then we can do the xform.
5367  if (getOpcode() == ISD::TokenFactor) {
5368    for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5369      if (getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
5370        return true;
5371    return false;
5372  }
5373
5374  // Loads don't have side effects, look through them.
5375  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
5376    if (!Ld->isVolatile())
5377      return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
5378  }
5379  return false;
5380}
5381
5382/// isPredecessorOf - Return true if this node is a predecessor of N. This node
5383/// is either an operand of N or it can be reached by traversing up the operands.
5384/// NOTE: this is an expensive method. Use it carefully.
5385bool SDNode::isPredecessorOf(SDNode *N) const {
5386  SmallPtrSet<SDNode *, 32> Visited;
5387  SmallVector<SDNode *, 16> Worklist;
5388  Worklist.push_back(N);
5389
5390  do {
5391    N = Worklist.pop_back_val();
5392    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
5393      SDNode *Op = N->getOperand(i).getNode();
5394      if (Op == this)
5395        return true;
5396      if (Visited.insert(Op))
5397        Worklist.push_back(Op);
5398    }
5399  } while (!Worklist.empty());
5400
5401  return false;
5402}
5403
5404uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
5405  assert(Num < NumOperands && "Invalid child # of SDNode!");
5406  return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
5407}
5408
5409std::string SDNode::getOperationName(const SelectionDAG *G) const {
5410  switch (getOpcode()) {
5411  default:
5412    if (getOpcode() < ISD::BUILTIN_OP_END)
5413      return "<<Unknown DAG Node>>";
5414    if (isMachineOpcode()) {
5415      if (G)
5416        if (const TargetInstrInfo *TII = G->getTarget().getInstrInfo())
5417          if (getMachineOpcode() < TII->getNumOpcodes())
5418            return TII->get(getMachineOpcode()).getName();
5419      return "<<Unknown Machine Node>>";
5420    }
5421    if (G) {
5422      const TargetLowering &TLI = G->getTargetLoweringInfo();
5423      const char *Name = TLI.getTargetNodeName(getOpcode());
5424      if (Name) return Name;
5425      return "<<Unknown Target Node>>";
5426    }
5427    return "<<Unknown Node>>";
5428
5429#ifndef NDEBUG
5430  case ISD::DELETED_NODE:
5431    return "<<Deleted Node!>>";
5432#endif
5433  case ISD::PREFETCH:      return "Prefetch";
5434  case ISD::MEMBARRIER:    return "MemBarrier";
5435  case ISD::ATOMIC_CMP_SWAP:    return "AtomicCmpSwap";
5436  case ISD::ATOMIC_SWAP:        return "AtomicSwap";
5437  case ISD::ATOMIC_LOAD_ADD:    return "AtomicLoadAdd";
5438  case ISD::ATOMIC_LOAD_SUB:    return "AtomicLoadSub";
5439  case ISD::ATOMIC_LOAD_AND:    return "AtomicLoadAnd";
5440  case ISD::ATOMIC_LOAD_OR:     return "AtomicLoadOr";
5441  case ISD::ATOMIC_LOAD_XOR:    return "AtomicLoadXor";
5442  case ISD::ATOMIC_LOAD_NAND:   return "AtomicLoadNand";
5443  case ISD::ATOMIC_LOAD_MIN:    return "AtomicLoadMin";
5444  case ISD::ATOMIC_LOAD_MAX:    return "AtomicLoadMax";
5445  case ISD::ATOMIC_LOAD_UMIN:   return "AtomicLoadUMin";
5446  case ISD::ATOMIC_LOAD_UMAX:   return "AtomicLoadUMax";
5447  case ISD::PCMARKER:      return "PCMarker";
5448  case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
5449  case ISD::SRCVALUE:      return "SrcValue";
5450  case ISD::EntryToken:    return "EntryToken";
5451  case ISD::TokenFactor:   return "TokenFactor";
5452  case ISD::AssertSext:    return "AssertSext";
5453  case ISD::AssertZext:    return "AssertZext";
5454
5455  case ISD::BasicBlock:    return "BasicBlock";
5456  case ISD::VALUETYPE:     return "ValueType";
5457  case ISD::Register:      return "Register";
5458
5459  case ISD::Constant:      return "Constant";
5460  case ISD::ConstantFP:    return "ConstantFP";
5461  case ISD::GlobalAddress: return "GlobalAddress";
5462  case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
5463  case ISD::FrameIndex:    return "FrameIndex";
5464  case ISD::JumpTable:     return "JumpTable";
5465  case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
5466  case ISD::RETURNADDR: return "RETURNADDR";
5467  case ISD::FRAMEADDR: return "FRAMEADDR";
5468  case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
5469  case ISD::EXCEPTIONADDR: return "EXCEPTIONADDR";
5470  case ISD::LSDAADDR: return "LSDAADDR";
5471  case ISD::EHSELECTION: return "EHSELECTION";
5472  case ISD::EH_RETURN: return "EH_RETURN";
5473  case ISD::ConstantPool:  return "ConstantPool";
5474  case ISD::ExternalSymbol: return "ExternalSymbol";
5475  case ISD::BlockAddress:  return "BlockAddress";
5476  case ISD::INTRINSIC_WO_CHAIN:
5477  case ISD::INTRINSIC_VOID:
5478  case ISD::INTRINSIC_W_CHAIN: {
5479    unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
5480    unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
5481    if (IID < Intrinsic::num_intrinsics)
5482      return Intrinsic::getName((Intrinsic::ID)IID);
5483    else if (const TargetIntrinsicInfo *TII = G->getTarget().getIntrinsicInfo())
5484      return TII->getName(IID);
5485    llvm_unreachable("Invalid intrinsic ID");
5486  }
5487
5488  case ISD::BUILD_VECTOR:   return "BUILD_VECTOR";
5489  case ISD::TargetConstant: return "TargetConstant";
5490  case ISD::TargetConstantFP:return "TargetConstantFP";
5491  case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
5492  case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
5493  case ISD::TargetFrameIndex: return "TargetFrameIndex";
5494  case ISD::TargetJumpTable:  return "TargetJumpTable";
5495  case ISD::TargetConstantPool:  return "TargetConstantPool";
5496  case ISD::TargetExternalSymbol: return "TargetExternalSymbol";
5497  case ISD::TargetBlockAddress: return "TargetBlockAddress";
5498
5499  case ISD::CopyToReg:     return "CopyToReg";
5500  case ISD::CopyFromReg:   return "CopyFromReg";
5501  case ISD::UNDEF:         return "undef";
5502  case ISD::MERGE_VALUES:  return "merge_values";
5503  case ISD::INLINEASM:     return "inlineasm";
5504  case ISD::EH_LABEL:      return "eh_label";
5505  case ISD::HANDLENODE:    return "handlenode";
5506
5507  // Unary operators
5508  case ISD::FABS:   return "fabs";
5509  case ISD::FNEG:   return "fneg";
5510  case ISD::FSQRT:  return "fsqrt";
5511  case ISD::FSIN:   return "fsin";
5512  case ISD::FCOS:   return "fcos";
5513  case ISD::FPOWI:  return "fpowi";
5514  case ISD::FPOW:   return "fpow";
5515  case ISD::FTRUNC: return "ftrunc";
5516  case ISD::FFLOOR: return "ffloor";
5517  case ISD::FCEIL:  return "fceil";
5518  case ISD::FRINT:  return "frint";
5519  case ISD::FNEARBYINT: return "fnearbyint";
5520
5521  // Binary operators
5522  case ISD::ADD:    return "add";
5523  case ISD::SUB:    return "sub";
5524  case ISD::MUL:    return "mul";
5525  case ISD::MULHU:  return "mulhu";
5526  case ISD::MULHS:  return "mulhs";
5527  case ISD::SDIV:   return "sdiv";
5528  case ISD::UDIV:   return "udiv";
5529  case ISD::SREM:   return "srem";
5530  case ISD::UREM:   return "urem";
5531  case ISD::SMUL_LOHI:  return "smul_lohi";
5532  case ISD::UMUL_LOHI:  return "umul_lohi";
5533  case ISD::SDIVREM:    return "sdivrem";
5534  case ISD::UDIVREM:    return "udivrem";
5535  case ISD::AND:    return "and";
5536  case ISD::OR:     return "or";
5537  case ISD::XOR:    return "xor";
5538  case ISD::SHL:    return "shl";
5539  case ISD::SRA:    return "sra";
5540  case ISD::SRL:    return "srl";
5541  case ISD::ROTL:   return "rotl";
5542  case ISD::ROTR:   return "rotr";
5543  case ISD::FADD:   return "fadd";
5544  case ISD::FSUB:   return "fsub";
5545  case ISD::FMUL:   return "fmul";
5546  case ISD::FDIV:   return "fdiv";
5547  case ISD::FREM:   return "frem";
5548  case ISD::FCOPYSIGN: return "fcopysign";
5549  case ISD::FGETSIGN:  return "fgetsign";
5550
5551  case ISD::SETCC:       return "setcc";
5552  case ISD::VSETCC:      return "vsetcc";
5553  case ISD::SELECT:      return "select";
5554  case ISD::SELECT_CC:   return "select_cc";
5555  case ISD::INSERT_VECTOR_ELT:   return "insert_vector_elt";
5556  case ISD::EXTRACT_VECTOR_ELT:  return "extract_vector_elt";
5557  case ISD::CONCAT_VECTORS:      return "concat_vectors";
5558  case ISD::EXTRACT_SUBVECTOR:   return "extract_subvector";
5559  case ISD::SCALAR_TO_VECTOR:    return "scalar_to_vector";
5560  case ISD::VECTOR_SHUFFLE:      return "vector_shuffle";
5561  case ISD::CARRY_FALSE:         return "carry_false";
5562  case ISD::ADDC:        return "addc";
5563  case ISD::ADDE:        return "adde";
5564  case ISD::SADDO:       return "saddo";
5565  case ISD::UADDO:       return "uaddo";
5566  case ISD::SSUBO:       return "ssubo";
5567  case ISD::USUBO:       return "usubo";
5568  case ISD::SMULO:       return "smulo";
5569  case ISD::UMULO:       return "umulo";
5570  case ISD::SUBC:        return "subc";
5571  case ISD::SUBE:        return "sube";
5572  case ISD::SHL_PARTS:   return "shl_parts";
5573  case ISD::SRA_PARTS:   return "sra_parts";
5574  case ISD::SRL_PARTS:   return "srl_parts";
5575
5576  // Conversion operators.
5577  case ISD::SIGN_EXTEND: return "sign_extend";
5578  case ISD::ZERO_EXTEND: return "zero_extend";
5579  case ISD::ANY_EXTEND:  return "any_extend";
5580  case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
5581  case ISD::TRUNCATE:    return "truncate";
5582  case ISD::FP_ROUND:    return "fp_round";
5583  case ISD::FLT_ROUNDS_: return "flt_rounds";
5584  case ISD::FP_ROUND_INREG: return "fp_round_inreg";
5585  case ISD::FP_EXTEND:   return "fp_extend";
5586
5587  case ISD::SINT_TO_FP:  return "sint_to_fp";
5588  case ISD::UINT_TO_FP:  return "uint_to_fp";
5589  case ISD::FP_TO_SINT:  return "fp_to_sint";
5590  case ISD::FP_TO_UINT:  return "fp_to_uint";
5591  case ISD::BIT_CONVERT: return "bit_convert";
5592
5593  case ISD::CONVERT_RNDSAT: {
5594    switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
5595    default: llvm_unreachable("Unknown cvt code!");
5596    case ISD::CVT_FF:  return "cvt_ff";
5597    case ISD::CVT_FS:  return "cvt_fs";
5598    case ISD::CVT_FU:  return "cvt_fu";
5599    case ISD::CVT_SF:  return "cvt_sf";
5600    case ISD::CVT_UF:  return "cvt_uf";
5601    case ISD::CVT_SS:  return "cvt_ss";
5602    case ISD::CVT_SU:  return "cvt_su";
5603    case ISD::CVT_US:  return "cvt_us";
5604    case ISD::CVT_UU:  return "cvt_uu";
5605    }
5606  }
5607
5608    // Control flow instructions
5609  case ISD::BR:      return "br";
5610  case ISD::BRIND:   return "brind";
5611  case ISD::BR_JT:   return "br_jt";
5612  case ISD::BRCOND:  return "brcond";
5613  case ISD::BR_CC:   return "br_cc";
5614  case ISD::CALLSEQ_START:  return "callseq_start";
5615  case ISD::CALLSEQ_END:    return "callseq_end";
5616
5617    // Other operators
5618  case ISD::LOAD:               return "load";
5619  case ISD::STORE:              return "store";
5620  case ISD::VAARG:              return "vaarg";
5621  case ISD::VACOPY:             return "vacopy";
5622  case ISD::VAEND:              return "vaend";
5623  case ISD::VASTART:            return "vastart";
5624  case ISD::DYNAMIC_STACKALLOC: return "dynamic_stackalloc";
5625  case ISD::EXTRACT_ELEMENT:    return "extract_element";
5626  case ISD::BUILD_PAIR:         return "build_pair";
5627  case ISD::STACKSAVE:          return "stacksave";
5628  case ISD::STACKRESTORE:       return "stackrestore";
5629  case ISD::TRAP:               return "trap";
5630
5631  // Bit manipulation
5632  case ISD::BSWAP:   return "bswap";
5633  case ISD::CTPOP:   return "ctpop";
5634  case ISD::CTTZ:    return "cttz";
5635  case ISD::CTLZ:    return "ctlz";
5636
5637  // Trampolines
5638  case ISD::TRAMPOLINE: return "trampoline";
5639
5640  case ISD::CONDCODE:
5641    switch (cast<CondCodeSDNode>(this)->get()) {
5642    default: llvm_unreachable("Unknown setcc condition!");
5643    case ISD::SETOEQ:  return "setoeq";
5644    case ISD::SETOGT:  return "setogt";
5645    case ISD::SETOGE:  return "setoge";
5646    case ISD::SETOLT:  return "setolt";
5647    case ISD::SETOLE:  return "setole";
5648    case ISD::SETONE:  return "setone";
5649
5650    case ISD::SETO:    return "seto";
5651    case ISD::SETUO:   return "setuo";
5652    case ISD::SETUEQ:  return "setue";
5653    case ISD::SETUGT:  return "setugt";
5654    case ISD::SETUGE:  return "setuge";
5655    case ISD::SETULT:  return "setult";
5656    case ISD::SETULE:  return "setule";
5657    case ISD::SETUNE:  return "setune";
5658
5659    case ISD::SETEQ:   return "seteq";
5660    case ISD::SETGT:   return "setgt";
5661    case ISD::SETGE:   return "setge";
5662    case ISD::SETLT:   return "setlt";
5663    case ISD::SETLE:   return "setle";
5664    case ISD::SETNE:   return "setne";
5665    }
5666  }
5667}
5668
5669const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
5670  switch (AM) {
5671  default:
5672    return "";
5673  case ISD::PRE_INC:
5674    return "<pre-inc>";
5675  case ISD::PRE_DEC:
5676    return "<pre-dec>";
5677  case ISD::POST_INC:
5678    return "<post-inc>";
5679  case ISD::POST_DEC:
5680    return "<post-dec>";
5681  }
5682}
5683
5684std::string ISD::ArgFlagsTy::getArgFlagsString() {
5685  std::string S = "< ";
5686
5687  if (isZExt())
5688    S += "zext ";
5689  if (isSExt())
5690    S += "sext ";
5691  if (isInReg())
5692    S += "inreg ";
5693  if (isSRet())
5694    S += "sret ";
5695  if (isByVal())
5696    S += "byval ";
5697  if (isNest())
5698    S += "nest ";
5699  if (getByValAlign())
5700    S += "byval-align:" + utostr(getByValAlign()) + " ";
5701  if (getOrigAlign())
5702    S += "orig-align:" + utostr(getOrigAlign()) + " ";
5703  if (getByValSize())
5704    S += "byval-size:" + utostr(getByValSize()) + " ";
5705  return S + ">";
5706}
5707
5708void SDNode::dump() const { dump(0); }
5709void SDNode::dump(const SelectionDAG *G) const {
5710  print(errs(), G);
5711}
5712
5713void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
5714  OS << (void*)this << ": ";
5715
5716  for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
5717    if (i) OS << ",";
5718    if (getValueType(i) == MVT::Other)
5719      OS << "ch";
5720    else
5721      OS << getValueType(i).getEVTString();
5722  }
5723  OS << " = " << getOperationName(G);
5724}
5725
5726void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
5727  if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
5728    if (!MN->memoperands_empty()) {
5729      OS << "<";
5730      OS << "Mem:";
5731      for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
5732           e = MN->memoperands_end(); i != e; ++i) {
5733        OS << **i;
5734        if (next(i) != e)
5735          OS << " ";
5736      }
5737      OS << ">";
5738    }
5739  } else if (const ShuffleVectorSDNode *SVN =
5740               dyn_cast<ShuffleVectorSDNode>(this)) {
5741    OS << "<";
5742    for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
5743      int Idx = SVN->getMaskElt(i);
5744      if (i) OS << ",";
5745      if (Idx < 0)
5746        OS << "u";
5747      else
5748        OS << Idx;
5749    }
5750    OS << ">";
5751  } else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
5752    OS << '<' << CSDN->getAPIntValue() << '>';
5753  } else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
5754    if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
5755      OS << '<' << CSDN->getValueAPF().convertToFloat() << '>';
5756    else if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEdouble)
5757      OS << '<' << CSDN->getValueAPF().convertToDouble() << '>';
5758    else {
5759      OS << "<APFloat(";
5760      CSDN->getValueAPF().bitcastToAPInt().dump();
5761      OS << ")>";
5762    }
5763  } else if (const GlobalAddressSDNode *GADN =
5764             dyn_cast<GlobalAddressSDNode>(this)) {
5765    int64_t offset = GADN->getOffset();
5766    OS << '<';
5767    WriteAsOperand(OS, GADN->getGlobal());
5768    OS << '>';
5769    if (offset > 0)
5770      OS << " + " << offset;
5771    else
5772      OS << " " << offset;
5773    if (unsigned int TF = GADN->getTargetFlags())
5774      OS << " [TF=" << TF << ']';
5775  } else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
5776    OS << "<" << FIDN->getIndex() << ">";
5777  } else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
5778    OS << "<" << JTDN->getIndex() << ">";
5779    if (unsigned int TF = JTDN->getTargetFlags())
5780      OS << " [TF=" << TF << ']';
5781  } else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
5782    int offset = CP->getOffset();
5783    if (CP->isMachineConstantPoolEntry())
5784      OS << "<" << *CP->getMachineCPVal() << ">";
5785    else
5786      OS << "<" << *CP->getConstVal() << ">";
5787    if (offset > 0)
5788      OS << " + " << offset;
5789    else
5790      OS << " " << offset;
5791    if (unsigned int TF = CP->getTargetFlags())
5792      OS << " [TF=" << TF << ']';
5793  } else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
5794    OS << "<";
5795    const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
5796    if (LBB)
5797      OS << LBB->getName() << " ";
5798    OS << (const void*)BBDN->getBasicBlock() << ">";
5799  } else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
5800    if (G && R->getReg() &&
5801        TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
5802      OS << " %" << G->getTarget().getRegisterInfo()->getName(R->getReg());
5803    } else {
5804      OS << " %reg" << R->getReg();
5805    }
5806  } else if (const ExternalSymbolSDNode *ES =
5807             dyn_cast<ExternalSymbolSDNode>(this)) {
5808    OS << "'" << ES->getSymbol() << "'";
5809    if (unsigned int TF = ES->getTargetFlags())
5810      OS << " [TF=" << TF << ']';
5811  } else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
5812    if (M->getValue())
5813      OS << "<" << M->getValue() << ">";
5814    else
5815      OS << "<null>";
5816  } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
5817    OS << ":" << N->getVT().getEVTString();
5818  }
5819  else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
5820    OS << "<" << *LD->getMemOperand();
5821
5822    bool doExt = true;
5823    switch (LD->getExtensionType()) {
5824    default: doExt = false; break;
5825    case ISD::EXTLOAD: OS << ", anyext"; break;
5826    case ISD::SEXTLOAD: OS << ", sext"; break;
5827    case ISD::ZEXTLOAD: OS << ", zext"; break;
5828    }
5829    if (doExt)
5830      OS << " from " << LD->getMemoryVT().getEVTString();
5831
5832    const char *AM = getIndexedModeName(LD->getAddressingMode());
5833    if (*AM)
5834      OS << ", " << AM;
5835
5836    OS << ">";
5837  } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
5838    OS << "<" << *ST->getMemOperand();
5839
5840    if (ST->isTruncatingStore())
5841      OS << ", trunc to " << ST->getMemoryVT().getEVTString();
5842
5843    const char *AM = getIndexedModeName(ST->getAddressingMode());
5844    if (*AM)
5845      OS << ", " << AM;
5846
5847    OS << ">";
5848  } else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
5849    OS << "<" << *M->getMemOperand() << ">";
5850  } else if (const BlockAddressSDNode *BA =
5851               dyn_cast<BlockAddressSDNode>(this)) {
5852    OS << "<";
5853    WriteAsOperand(OS, BA->getBlockAddress()->getFunction(), false);
5854    OS << ", ";
5855    WriteAsOperand(OS, BA->getBlockAddress()->getBasicBlock(), false);
5856    OS << ">";
5857    if (unsigned int TF = BA->getTargetFlags())
5858      OS << " [TF=" << TF << ']';
5859  }
5860}
5861
5862void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
5863  print_types(OS, G);
5864  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
5865    if (i) OS << ", "; else OS << " ";
5866    OS << (void*)getOperand(i).getNode();
5867    if (unsigned RN = getOperand(i).getResNo())
5868      OS << ":" << RN;
5869  }
5870  print_details(OS, G);
5871}
5872
5873static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
5874  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5875    if (N->getOperand(i).getNode()->hasOneUse())
5876      DumpNodes(N->getOperand(i).getNode(), indent+2, G);
5877    else
5878      errs() << "\n" << std::string(indent+2, ' ')
5879             << (void*)N->getOperand(i).getNode() << ": <multiple use>";
5880
5881
5882  errs() << "\n";
5883  errs().indent(indent);
5884  N->dump(G);
5885}
5886
5887SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
5888  assert(N->getNumValues() == 1 &&
5889         "Can't unroll a vector with multiple results!");
5890
5891  EVT VT = N->getValueType(0);
5892  unsigned NE = VT.getVectorNumElements();
5893  EVT EltVT = VT.getVectorElementType();
5894  DebugLoc dl = N->getDebugLoc();
5895
5896  SmallVector<SDValue, 8> Scalars;
5897  SmallVector<SDValue, 4> Operands(N->getNumOperands());
5898
5899  // If ResNE is 0, fully unroll the vector op.
5900  if (ResNE == 0)
5901    ResNE = NE;
5902  else if (NE > ResNE)
5903    NE = ResNE;
5904
5905  unsigned i;
5906  for (i= 0; i != NE; ++i) {
5907    for (unsigned j = 0; j != N->getNumOperands(); ++j) {
5908      SDValue Operand = N->getOperand(j);
5909      EVT OperandVT = Operand.getValueType();
5910      if (OperandVT.isVector()) {
5911        // A vector operand; extract a single element.
5912        EVT OperandEltVT = OperandVT.getVectorElementType();
5913        Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
5914                              OperandEltVT,
5915                              Operand,
5916                              getConstant(i, MVT::i32));
5917      } else {
5918        // A scalar operand; just use it as is.
5919        Operands[j] = Operand;
5920      }
5921    }
5922
5923    switch (N->getOpcode()) {
5924    default:
5925      Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
5926                                &Operands[0], Operands.size()));
5927      break;
5928    case ISD::SHL:
5929    case ISD::SRA:
5930    case ISD::SRL:
5931    case ISD::ROTL:
5932    case ISD::ROTR:
5933      Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
5934                                getShiftAmountOperand(Operands[1])));
5935      break;
5936    }
5937  }
5938
5939  for (; i < ResNE; ++i)
5940    Scalars.push_back(getUNDEF(EltVT));
5941
5942  return getNode(ISD::BUILD_VECTOR, dl,
5943                 EVT::getVectorVT(*getContext(), EltVT, ResNE),
5944                 &Scalars[0], Scalars.size());
5945}
5946
5947
5948/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
5949/// location that is 'Dist' units away from the location that the 'Base' load
5950/// is loading from.
5951bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
5952                                     unsigned Bytes, int Dist) const {
5953  if (LD->getChain() != Base->getChain())
5954    return false;
5955  EVT VT = LD->getValueType(0);
5956  if (VT.getSizeInBits() / 8 != Bytes)
5957    return false;
5958
5959  SDValue Loc = LD->getOperand(1);
5960  SDValue BaseLoc = Base->getOperand(1);
5961  if (Loc.getOpcode() == ISD::FrameIndex) {
5962    if (BaseLoc.getOpcode() != ISD::FrameIndex)
5963      return false;
5964    const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
5965    int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
5966    int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
5967    int FS  = MFI->getObjectSize(FI);
5968    int BFS = MFI->getObjectSize(BFI);
5969    if (FS != BFS || FS != (int)Bytes) return false;
5970    return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
5971  }
5972  if (Loc.getOpcode() == ISD::ADD && Loc.getOperand(0) == BaseLoc) {
5973    ConstantSDNode *V = dyn_cast<ConstantSDNode>(Loc.getOperand(1));
5974    if (V && (V->getSExtValue() == Dist*Bytes))
5975      return true;
5976  }
5977
5978  GlobalValue *GV1 = NULL;
5979  GlobalValue *GV2 = NULL;
5980  int64_t Offset1 = 0;
5981  int64_t Offset2 = 0;
5982  bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
5983  bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
5984  if (isGA1 && isGA2 && GV1 == GV2)
5985    return Offset1 == (Offset2 + Dist*Bytes);
5986  return false;
5987}
5988
5989
5990/// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
5991/// it cannot be inferred.
5992unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
5993  // If this is a GlobalAddress + cst, return the alignment.
5994  GlobalValue *GV;
5995  int64_t GVOffset = 0;
5996  if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset))
5997    return MinAlign(GV->getAlignment(), GVOffset);
5998
5999  // If this is a direct reference to a stack slot, use information about the
6000  // stack slot's alignment.
6001  int FrameIdx = 1 << 31;
6002  int64_t FrameOffset = 0;
6003  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6004    FrameIdx = FI->getIndex();
6005  } else if (Ptr.getOpcode() == ISD::ADD &&
6006             isa<ConstantSDNode>(Ptr.getOperand(1)) &&
6007             isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6008    FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6009    FrameOffset = Ptr.getConstantOperandVal(1);
6010  }
6011
6012  if (FrameIdx != (1 << 31)) {
6013    // FIXME: Handle FI+CST.
6014    const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6015    unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6016                                    FrameOffset);
6017    if (MFI.isFixedObjectIndex(FrameIdx)) {
6018      int64_t ObjectOffset = MFI.getObjectOffset(FrameIdx) + FrameOffset;
6019
6020      // The alignment of the frame index can be determined from its offset from
6021      // the incoming frame position.  If the frame object is at offset 32 and
6022      // the stack is guaranteed to be 16-byte aligned, then we know that the
6023      // object is 16-byte aligned.
6024      unsigned StackAlign = getTarget().getFrameInfo()->getStackAlignment();
6025      unsigned Align = MinAlign(ObjectOffset, StackAlign);
6026
6027      // Finally, the frame object itself may have a known alignment.  Factor
6028      // the alignment + offset into a new alignment.  For example, if we know
6029      // the FI is 8 byte aligned, but the pointer is 4 off, we really have a
6030      // 4-byte alignment of the resultant pointer.  Likewise align 4 + 4-byte
6031      // offset = 4-byte alignment, align 4 + 1-byte offset = align 1, etc.
6032      return std::max(Align, FIInfoAlign);
6033    }
6034    return FIInfoAlign;
6035  }
6036
6037  return 0;
6038}
6039
6040void SelectionDAG::dump() const {
6041  errs() << "SelectionDAG has " << AllNodes.size() << " nodes:";
6042
6043  for (allnodes_const_iterator I = allnodes_begin(), E = allnodes_end();
6044       I != E; ++I) {
6045    const SDNode *N = I;
6046    if (!N->hasOneUse() && N != getRoot().getNode())
6047      DumpNodes(N, 2, this);
6048  }
6049
6050  if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
6051
6052  errs() << "\n\n";
6053}
6054
6055void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
6056  print_types(OS, G);
6057  print_details(OS, G);
6058}
6059
6060typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
6061static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
6062                       const SelectionDAG *G, VisitedSDNodeSet &once) {
6063  if (!once.insert(N))          // If we've been here before, return now.
6064    return;
6065  // Dump the current SDNode, but don't end the line yet.
6066  OS << std::string(indent, ' ');
6067  N->printr(OS, G);
6068  // Having printed this SDNode, walk the children:
6069  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
6070    const SDNode *child = N->getOperand(i).getNode();
6071    if (i) OS << ",";
6072    OS << " ";
6073    if (child->getNumOperands() == 0) {
6074      // This child has no grandchildren; print it inline right here.
6075      child->printr(OS, G);
6076      once.insert(child);
6077    } else {          // Just the address.  FIXME: also print the child's opcode
6078      OS << (void*)child;
6079      if (unsigned RN = N->getOperand(i).getResNo())
6080        OS << ":" << RN;
6081    }
6082  }
6083  OS << "\n";
6084  // Dump children that have grandchildren on their own line(s).
6085  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
6086    const SDNode *child = N->getOperand(i).getNode();
6087    DumpNodesr(OS, child, indent+2, G, once);
6088  }
6089}
6090
6091void SDNode::dumpr() const {
6092  VisitedSDNodeSet once;
6093  DumpNodesr(errs(), this, 0, 0, once);
6094}
6095
6096void SDNode::dumpr(const SelectionDAG *G) const {
6097  VisitedSDNodeSet once;
6098  DumpNodesr(errs(), this, 0, G, once);
6099}
6100
6101
6102// getAddressSpace - Return the address space this GlobalAddress belongs to.
6103unsigned GlobalAddressSDNode::getAddressSpace() const {
6104  return getGlobal()->getType()->getAddressSpace();
6105}
6106
6107
6108const Type *ConstantPoolSDNode::getType() const {
6109  if (isMachineConstantPoolEntry())
6110    return Val.MachineCPVal->getType();
6111  return Val.ConstVal->getType();
6112}
6113
6114bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6115                                        APInt &SplatUndef,
6116                                        unsigned &SplatBitSize,
6117                                        bool &HasAnyUndefs,
6118                                        unsigned MinSplatBits,
6119                                        bool isBigEndian) {
6120  EVT VT = getValueType(0);
6121  assert(VT.isVector() && "Expected a vector type");
6122  unsigned sz = VT.getSizeInBits();
6123  if (MinSplatBits > sz)
6124    return false;
6125
6126  SplatValue = APInt(sz, 0);
6127  SplatUndef = APInt(sz, 0);
6128
6129  // Get the bits.  Bits with undefined values (when the corresponding element
6130  // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6131  // in SplatValue.  If any of the values are not constant, give up and return
6132  // false.
6133  unsigned int nOps = getNumOperands();
6134  assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6135  unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6136
6137  for (unsigned j = 0; j < nOps; ++j) {
6138    unsigned i = isBigEndian ? nOps-1-j : j;
6139    SDValue OpVal = getOperand(i);
6140    unsigned BitPos = j * EltBitSize;
6141
6142    if (OpVal.getOpcode() == ISD::UNDEF)
6143      SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6144    else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6145      SplatValue |= (APInt(CN->getAPIntValue()).zextOrTrunc(EltBitSize).
6146                     zextOrTrunc(sz) << BitPos);
6147    else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6148      SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6149     else
6150      return false;
6151  }
6152
6153  // The build_vector is all constants or undefs.  Find the smallest element
6154  // size that splats the vector.
6155
6156  HasAnyUndefs = (SplatUndef != 0);
6157  while (sz > 8) {
6158
6159    unsigned HalfSize = sz / 2;
6160    APInt HighValue = APInt(SplatValue).lshr(HalfSize).trunc(HalfSize);
6161    APInt LowValue = APInt(SplatValue).trunc(HalfSize);
6162    APInt HighUndef = APInt(SplatUndef).lshr(HalfSize).trunc(HalfSize);
6163    APInt LowUndef = APInt(SplatUndef).trunc(HalfSize);
6164
6165    // If the two halves do not match (ignoring undef bits), stop here.
6166    if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6167        MinSplatBits > HalfSize)
6168      break;
6169
6170    SplatValue = HighValue | LowValue;
6171    SplatUndef = HighUndef & LowUndef;
6172
6173    sz = HalfSize;
6174  }
6175
6176  SplatBitSize = sz;
6177  return true;
6178}
6179
6180bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6181  // Find the first non-undef value in the shuffle mask.
6182  unsigned i, e;
6183  for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6184    /* search */;
6185
6186  assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6187
6188  // Make sure all remaining elements are either undef or the same as the first
6189  // non-undef value.
6190  for (int Idx = Mask[i]; i != e; ++i)
6191    if (Mask[i] >= 0 && Mask[i] != Idx)
6192      return false;
6193  return true;
6194}
6195