DAGCombiner.cpp revision 6726b6d75a8b679068a58cb954ba97cf9d1690ba
1//===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass combines dag nodes to form fewer, simpler DAG nodes.  It can be run
11// both before and after the DAG is legalized.
12//
13// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
14// primarily intended to handle simplification opportunities that are implicit
15// in the LLVM IR and exposed by the various codegen lowering phases.
16//
17//===----------------------------------------------------------------------===//
18
19#define DEBUG_TYPE "dagcombine"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/DerivedTypes.h"
22#include "llvm/LLVMContext.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/PseudoSourceValue.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Target/TargetData.h"
28#include "llvm/Target/TargetFrameInfo.h"
29#include "llvm/Target/TargetLowering.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Target/TargetOptions.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/Support/Compiler.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/MathExtras.h"
39#include "llvm/Support/raw_ostream.h"
40#include <algorithm>
41#include <set>
42using namespace llvm;
43
44STATISTIC(NodesCombined   , "Number of dag nodes combined");
45STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
46STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
47STATISTIC(OpsNarrowed     , "Number of load/op/store narrowed");
48
49namespace {
50  static cl::opt<bool>
51    CombinerAA("combiner-alias-analysis", cl::Hidden,
52               cl::desc("Turn on alias analysis during testing"));
53
54  static cl::opt<bool>
55    CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
56               cl::desc("Include global information in alias analysis"));
57
58//------------------------------ DAGCombiner ---------------------------------//
59
60  class DAGCombiner {
61    SelectionDAG &DAG;
62    const TargetLowering &TLI;
63    CombineLevel Level;
64    CodeGenOpt::Level OptLevel;
65    bool LegalOperations;
66    bool LegalTypes;
67
68    // Worklist of all of the nodes that need to be simplified.
69    std::vector<SDNode*> WorkList;
70
71    // AA - Used for DAG load/store alias analysis.
72    AliasAnalysis &AA;
73
74    /// AddUsersToWorkList - When an instruction is simplified, add all users of
75    /// the instruction to the work lists because they might get more simplified
76    /// now.
77    ///
78    void AddUsersToWorkList(SDNode *N) {
79      for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
80           UI != UE; ++UI)
81        AddToWorkList(*UI);
82    }
83
84    /// visit - call the node-specific routine that knows how to fold each
85    /// particular type of node.
86    SDValue visit(SDNode *N);
87
88  public:
89    /// AddToWorkList - Add to the work list making sure it's instance is at the
90    /// the back (next to be processed.)
91    void AddToWorkList(SDNode *N) {
92      removeFromWorkList(N);
93      WorkList.push_back(N);
94    }
95
96    /// removeFromWorkList - remove all instances of N from the worklist.
97    ///
98    void removeFromWorkList(SDNode *N) {
99      WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), N),
100                     WorkList.end());
101    }
102
103    SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
104                      bool AddTo = true);
105
106    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
107      return CombineTo(N, &Res, 1, AddTo);
108    }
109
110    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
111                      bool AddTo = true) {
112      SDValue To[] = { Res0, Res1 };
113      return CombineTo(N, To, 2, AddTo);
114    }
115
116    void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
117
118  private:
119
120    /// SimplifyDemandedBits - Check the specified integer node value to see if
121    /// it can be simplified or if things it uses can be simplified by bit
122    /// propagation.  If so, return true.
123    bool SimplifyDemandedBits(SDValue Op) {
124      APInt Demanded = APInt::getAllOnesValue(Op.getValueSizeInBits());
125      return SimplifyDemandedBits(Op, Demanded);
126    }
127
128    bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded);
129
130    bool CombineToPreIndexedLoadStore(SDNode *N);
131    bool CombineToPostIndexedLoadStore(SDNode *N);
132
133
134    /// combine - call the node-specific routine that knows how to fold each
135    /// particular type of node. If that doesn't do anything, try the
136    /// target-specific DAG combines.
137    SDValue combine(SDNode *N);
138
139    // Visitation implementation - Implement dag node combining for different
140    // node types.  The semantics are as follows:
141    // Return Value:
142    //   SDValue.getNode() == 0 - No change was made
143    //   SDValue.getNode() == N - N was replaced, is dead and has been handled.
144    //   otherwise              - N should be replaced by the returned Operand.
145    //
146    SDValue visitTokenFactor(SDNode *N);
147    SDValue visitMERGE_VALUES(SDNode *N);
148    SDValue visitADD(SDNode *N);
149    SDValue visitSUB(SDNode *N);
150    SDValue visitADDC(SDNode *N);
151    SDValue visitADDE(SDNode *N);
152    SDValue visitMUL(SDNode *N);
153    SDValue visitSDIV(SDNode *N);
154    SDValue visitUDIV(SDNode *N);
155    SDValue visitSREM(SDNode *N);
156    SDValue visitUREM(SDNode *N);
157    SDValue visitMULHU(SDNode *N);
158    SDValue visitMULHS(SDNode *N);
159    SDValue visitSMUL_LOHI(SDNode *N);
160    SDValue visitUMUL_LOHI(SDNode *N);
161    SDValue visitSDIVREM(SDNode *N);
162    SDValue visitUDIVREM(SDNode *N);
163    SDValue visitAND(SDNode *N);
164    SDValue visitOR(SDNode *N);
165    SDValue visitXOR(SDNode *N);
166    SDValue SimplifyVBinOp(SDNode *N);
167    SDValue visitSHL(SDNode *N);
168    SDValue visitSRA(SDNode *N);
169    SDValue visitSRL(SDNode *N);
170    SDValue visitCTLZ(SDNode *N);
171    SDValue visitCTTZ(SDNode *N);
172    SDValue visitCTPOP(SDNode *N);
173    SDValue visitSELECT(SDNode *N);
174    SDValue visitSELECT_CC(SDNode *N);
175    SDValue visitSETCC(SDNode *N);
176    SDValue visitSIGN_EXTEND(SDNode *N);
177    SDValue visitZERO_EXTEND(SDNode *N);
178    SDValue visitANY_EXTEND(SDNode *N);
179    SDValue visitSIGN_EXTEND_INREG(SDNode *N);
180    SDValue visitTRUNCATE(SDNode *N);
181    SDValue visitBIT_CONVERT(SDNode *N);
182    SDValue visitBUILD_PAIR(SDNode *N);
183    SDValue visitFADD(SDNode *N);
184    SDValue visitFSUB(SDNode *N);
185    SDValue visitFMUL(SDNode *N);
186    SDValue visitFDIV(SDNode *N);
187    SDValue visitFREM(SDNode *N);
188    SDValue visitFCOPYSIGN(SDNode *N);
189    SDValue visitSINT_TO_FP(SDNode *N);
190    SDValue visitUINT_TO_FP(SDNode *N);
191    SDValue visitFP_TO_SINT(SDNode *N);
192    SDValue visitFP_TO_UINT(SDNode *N);
193    SDValue visitFP_ROUND(SDNode *N);
194    SDValue visitFP_ROUND_INREG(SDNode *N);
195    SDValue visitFP_EXTEND(SDNode *N);
196    SDValue visitFNEG(SDNode *N);
197    SDValue visitFABS(SDNode *N);
198    SDValue visitBRCOND(SDNode *N);
199    SDValue visitBR_CC(SDNode *N);
200    SDValue visitLOAD(SDNode *N);
201    SDValue visitSTORE(SDNode *N);
202    SDValue visitINSERT_VECTOR_ELT(SDNode *N);
203    SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
204    SDValue visitBUILD_VECTOR(SDNode *N);
205    SDValue visitCONCAT_VECTORS(SDNode *N);
206    SDValue visitVECTOR_SHUFFLE(SDNode *N);
207
208    SDValue XformToShuffleWithZero(SDNode *N);
209    SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
210
211    SDValue visitShiftByConstant(SDNode *N, unsigned Amt);
212
213    bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
214    SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
215    SDValue SimplifySelect(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2);
216    SDValue SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2,
217                             SDValue N3, ISD::CondCode CC,
218                             bool NotExtCompare = false);
219    SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
220                          DebugLoc DL, bool foldBooleans = true);
221    SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
222                                         unsigned HiOp);
223    SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
224    SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, EVT);
225    SDValue BuildSDIV(SDNode *N);
226    SDValue BuildUDIV(SDNode *N);
227    SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
228    SDValue ReduceLoadWidth(SDNode *N);
229    SDValue ReduceLoadOpStoreWidth(SDNode *N);
230
231    SDValue GetDemandedBits(SDValue V, const APInt &Mask);
232
233    /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
234    /// looking for aliasing nodes and adding them to the Aliases vector.
235    void GatherAllAliases(SDNode *N, SDValue OriginalChain,
236                          SmallVector<SDValue, 8> &Aliases);
237
238    /// isAlias - Return true if there is any possibility that the two addresses
239    /// overlap.
240    bool isAlias(SDValue Ptr1, int64_t Size1,
241                 const Value *SrcValue1, int SrcValueOffset1,
242                 unsigned SrcValueAlign1,
243                 SDValue Ptr2, int64_t Size2,
244                 const Value *SrcValue2, int SrcValueOffset2,
245                 unsigned SrcValueAlign2) const;
246
247    /// FindAliasInfo - Extracts the relevant alias information from the memory
248    /// node.  Returns true if the operand was a load.
249    bool FindAliasInfo(SDNode *N,
250                       SDValue &Ptr, int64_t &Size,
251                       const Value *&SrcValue, int &SrcValueOffset,
252                       unsigned &SrcValueAlignment) const;
253
254    /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
255    /// looking for a better chain (aliasing node.)
256    SDValue FindBetterChain(SDNode *N, SDValue Chain);
257
258    /// getShiftAmountTy - Returns a type large enough to hold any valid
259    /// shift amount - before type legalization these can be huge.
260    EVT getShiftAmountTy() {
261      return LegalTypes ?  TLI.getShiftAmountTy() : TLI.getPointerTy();
262    }
263
264public:
265    DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
266      : DAG(D),
267        TLI(D.getTargetLoweringInfo()),
268        Level(Unrestricted),
269        OptLevel(OL),
270        LegalOperations(false),
271        LegalTypes(false),
272        AA(A) {}
273
274    /// Run - runs the dag combiner on all nodes in the work list
275    void Run(CombineLevel AtLevel);
276  };
277}
278
279
280namespace {
281/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted
282/// nodes from the worklist.
283class WorkListRemover : public SelectionDAG::DAGUpdateListener {
284  DAGCombiner &DC;
285public:
286  explicit WorkListRemover(DAGCombiner &dc) : DC(dc) {}
287
288  virtual void NodeDeleted(SDNode *N, SDNode *E) {
289    DC.removeFromWorkList(N);
290  }
291
292  virtual void NodeUpdated(SDNode *N) {
293    // Ignore updates.
294  }
295};
296}
297
298//===----------------------------------------------------------------------===//
299//  TargetLowering::DAGCombinerInfo implementation
300//===----------------------------------------------------------------------===//
301
302void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
303  ((DAGCombiner*)DC)->AddToWorkList(N);
304}
305
306SDValue TargetLowering::DAGCombinerInfo::
307CombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo) {
308  return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
309}
310
311SDValue TargetLowering::DAGCombinerInfo::
312CombineTo(SDNode *N, SDValue Res, bool AddTo) {
313  return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
314}
315
316
317SDValue TargetLowering::DAGCombinerInfo::
318CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
319  return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
320}
321
322void TargetLowering::DAGCombinerInfo::
323CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
324  return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
325}
326
327//===----------------------------------------------------------------------===//
328// Helper Functions
329//===----------------------------------------------------------------------===//
330
331/// isNegatibleForFree - Return 1 if we can compute the negated form of the
332/// specified expression for the same cost as the expression itself, or 2 if we
333/// can compute the negated form more cheaply than the expression itself.
334static char isNegatibleForFree(SDValue Op, bool LegalOperations,
335                               unsigned Depth = 0) {
336  // No compile time optimizations on this type.
337  if (Op.getValueType() == MVT::ppcf128)
338    return 0;
339
340  // fneg is removable even if it has multiple uses.
341  if (Op.getOpcode() == ISD::FNEG) return 2;
342
343  // Don't allow anything with multiple uses.
344  if (!Op.hasOneUse()) return 0;
345
346  // Don't recurse exponentially.
347  if (Depth > 6) return 0;
348
349  switch (Op.getOpcode()) {
350  default: return false;
351  case ISD::ConstantFP:
352    // Don't invert constant FP values after legalize.  The negated constant
353    // isn't necessarily legal.
354    return LegalOperations ? 0 : 1;
355  case ISD::FADD:
356    // FIXME: determine better conditions for this xform.
357    if (!UnsafeFPMath) return 0;
358
359    // fold (fsub (fadd A, B)) -> (fsub (fneg A), B)
360    if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
361      return V;
362    // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
363    return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
364  case ISD::FSUB:
365    // We can't turn -(A-B) into B-A when we honor signed zeros.
366    if (!UnsafeFPMath) return 0;
367
368    // fold (fneg (fsub A, B)) -> (fsub B, A)
369    return 1;
370
371  case ISD::FMUL:
372  case ISD::FDIV:
373    if (HonorSignDependentRoundingFPMath()) return 0;
374
375    // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
376    if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
377      return V;
378
379    return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
380
381  case ISD::FP_EXTEND:
382  case ISD::FP_ROUND:
383  case ISD::FSIN:
384    return isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1);
385  }
386}
387
388/// GetNegatedExpression - If isNegatibleForFree returns true, this function
389/// returns the newly negated expression.
390static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
391                                    bool LegalOperations, unsigned Depth = 0) {
392  // fneg is removable even if it has multiple uses.
393  if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
394
395  // Don't allow anything with multiple uses.
396  assert(Op.hasOneUse() && "Unknown reuse!");
397
398  assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
399  switch (Op.getOpcode()) {
400  default: llvm_unreachable("Unknown code");
401  case ISD::ConstantFP: {
402    APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
403    V.changeSign();
404    return DAG.getConstantFP(V, Op.getValueType());
405  }
406  case ISD::FADD:
407    // FIXME: determine better conditions for this xform.
408    assert(UnsafeFPMath);
409
410    // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
411    if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
412      return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
413                         GetNegatedExpression(Op.getOperand(0), DAG,
414                                              LegalOperations, Depth+1),
415                         Op.getOperand(1));
416    // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
417    return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
418                       GetNegatedExpression(Op.getOperand(1), DAG,
419                                            LegalOperations, Depth+1),
420                       Op.getOperand(0));
421  case ISD::FSUB:
422    // We can't turn -(A-B) into B-A when we honor signed zeros.
423    assert(UnsafeFPMath);
424
425    // fold (fneg (fsub 0, B)) -> B
426    if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
427      if (N0CFP->getValueAPF().isZero())
428        return Op.getOperand(1);
429
430    // fold (fneg (fsub A, B)) -> (fsub B, A)
431    return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
432                       Op.getOperand(1), Op.getOperand(0));
433
434  case ISD::FMUL:
435  case ISD::FDIV:
436    assert(!HonorSignDependentRoundingFPMath());
437
438    // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
439    if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
440      return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
441                         GetNegatedExpression(Op.getOperand(0), DAG,
442                                              LegalOperations, Depth+1),
443                         Op.getOperand(1));
444
445    // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
446    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
447                       Op.getOperand(0),
448                       GetNegatedExpression(Op.getOperand(1), DAG,
449                                            LegalOperations, Depth+1));
450
451  case ISD::FP_EXTEND:
452  case ISD::FSIN:
453    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
454                       GetNegatedExpression(Op.getOperand(0), DAG,
455                                            LegalOperations, Depth+1));
456  case ISD::FP_ROUND:
457      return DAG.getNode(ISD::FP_ROUND, Op.getDebugLoc(), Op.getValueType(),
458                         GetNegatedExpression(Op.getOperand(0), DAG,
459                                              LegalOperations, Depth+1),
460                         Op.getOperand(1));
461  }
462}
463
464
465// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc
466// that selects between the values 1 and 0, making it equivalent to a setcc.
467// Also, set the incoming LHS, RHS, and CC references to the appropriate
468// nodes based on the type of node we are checking.  This simplifies life a
469// bit for the callers.
470static bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
471                              SDValue &CC) {
472  if (N.getOpcode() == ISD::SETCC) {
473    LHS = N.getOperand(0);
474    RHS = N.getOperand(1);
475    CC  = N.getOperand(2);
476    return true;
477  }
478  if (N.getOpcode() == ISD::SELECT_CC &&
479      N.getOperand(2).getOpcode() == ISD::Constant &&
480      N.getOperand(3).getOpcode() == ISD::Constant &&
481      cast<ConstantSDNode>(N.getOperand(2))->getAPIntValue() == 1 &&
482      cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) {
483    LHS = N.getOperand(0);
484    RHS = N.getOperand(1);
485    CC  = N.getOperand(4);
486    return true;
487  }
488  return false;
489}
490
491// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only
492// one use.  If this is true, it allows the users to invert the operation for
493// free when it is profitable to do so.
494static bool isOneUseSetCC(SDValue N) {
495  SDValue N0, N1, N2;
496  if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
497    return true;
498  return false;
499}
500
501SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL,
502                                    SDValue N0, SDValue N1) {
503  EVT VT = N0.getValueType();
504  if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
505    if (isa<ConstantSDNode>(N1)) {
506      // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
507      SDValue OpNode =
508        DAG.FoldConstantArithmetic(Opc, VT,
509                                   cast<ConstantSDNode>(N0.getOperand(1)),
510                                   cast<ConstantSDNode>(N1));
511      return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
512    } else if (N0.hasOneUse()) {
513      // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use
514      SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT,
515                                   N0.getOperand(0), N1);
516      AddToWorkList(OpNode.getNode());
517      return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
518    }
519  }
520
521  if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) {
522    if (isa<ConstantSDNode>(N0)) {
523      // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
524      SDValue OpNode =
525        DAG.FoldConstantArithmetic(Opc, VT,
526                                   cast<ConstantSDNode>(N1.getOperand(1)),
527                                   cast<ConstantSDNode>(N0));
528      return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
529    } else if (N1.hasOneUse()) {
530      // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use
531      SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT,
532                                   N1.getOperand(0), N0);
533      AddToWorkList(OpNode.getNode());
534      return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
535    }
536  }
537
538  return SDValue();
539}
540
541SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
542                               bool AddTo) {
543  assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
544  ++NodesCombined;
545  DEBUG(errs() << "\nReplacing.1 ";
546        N->dump(&DAG);
547        errs() << "\nWith: ";
548        To[0].getNode()->dump(&DAG);
549        errs() << " and " << NumTo-1 << " other values\n";
550        for (unsigned i = 0, e = NumTo; i != e; ++i)
551          assert(N->getValueType(i) == To[i].getValueType() &&
552                 "Cannot combine value to value of different type!"));
553  WorkListRemover DeadNodes(*this);
554  DAG.ReplaceAllUsesWith(N, To, &DeadNodes);
555
556  if (AddTo) {
557    // Push the new nodes and any users onto the worklist
558    for (unsigned i = 0, e = NumTo; i != e; ++i) {
559      if (To[i].getNode()) {
560        AddToWorkList(To[i].getNode());
561        AddUsersToWorkList(To[i].getNode());
562      }
563    }
564  }
565
566  // Finally, if the node is now dead, remove it from the graph.  The node
567  // may not be dead if the replacement process recursively simplified to
568  // something else needing this node.
569  if (N->use_empty()) {
570    // Nodes can be reintroduced into the worklist.  Make sure we do not
571    // process a node that has been replaced.
572    removeFromWorkList(N);
573
574    // Finally, since the node is now dead, remove it from the graph.
575    DAG.DeleteNode(N);
576  }
577  return SDValue(N, 0);
578}
579
580void
581DAGCombiner::CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &
582                                                                          TLO) {
583  // Replace all uses.  If any nodes become isomorphic to other nodes and
584  // are deleted, make sure to remove them from our worklist.
585  WorkListRemover DeadNodes(*this);
586  DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
587
588  // Push the new node and any (possibly new) users onto the worklist.
589  AddToWorkList(TLO.New.getNode());
590  AddUsersToWorkList(TLO.New.getNode());
591
592  // Finally, if the node is now dead, remove it from the graph.  The node
593  // may not be dead if the replacement process recursively simplified to
594  // something else needing this node.
595  if (TLO.Old.getNode()->use_empty()) {
596    removeFromWorkList(TLO.Old.getNode());
597
598    // If the operands of this node are only used by the node, they will now
599    // be dead.  Make sure to visit them first to delete dead nodes early.
600    for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands(); i != e; ++i)
601      if (TLO.Old.getNode()->getOperand(i).getNode()->hasOneUse())
602        AddToWorkList(TLO.Old.getNode()->getOperand(i).getNode());
603
604    DAG.DeleteNode(TLO.Old.getNode());
605  }
606}
607
608/// SimplifyDemandedBits - Check the specified integer node value to see if
609/// it can be simplified or if things it uses can be simplified by bit
610/// propagation.  If so, return true.
611bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
612  TargetLowering::TargetLoweringOpt TLO(DAG);
613  APInt KnownZero, KnownOne;
614  if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
615    return false;
616
617  // Revisit the node.
618  AddToWorkList(Op.getNode());
619
620  // Replace the old value with the new one.
621  ++NodesCombined;
622  DEBUG(errs() << "\nReplacing.2 ";
623        TLO.Old.getNode()->dump(&DAG);
624        errs() << "\nWith: ";
625        TLO.New.getNode()->dump(&DAG);
626        errs() << '\n');
627
628  CommitTargetLoweringOpt(TLO);
629  return true;
630}
631
632//===----------------------------------------------------------------------===//
633//  Main DAG Combiner implementation
634//===----------------------------------------------------------------------===//
635
636void DAGCombiner::Run(CombineLevel AtLevel) {
637  // set the instance variables, so that the various visit routines may use it.
638  Level = AtLevel;
639  LegalOperations = Level >= NoIllegalOperations;
640  LegalTypes = Level >= NoIllegalTypes;
641
642  // Add all the dag nodes to the worklist.
643  WorkList.reserve(DAG.allnodes_size());
644  for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
645       E = DAG.allnodes_end(); I != E; ++I)
646    WorkList.push_back(I);
647
648  // Create a dummy node (which is not added to allnodes), that adds a reference
649  // to the root node, preventing it from being deleted, and tracking any
650  // changes of the root.
651  HandleSDNode Dummy(DAG.getRoot());
652
653  // The root of the dag may dangle to deleted nodes until the dag combiner is
654  // done.  Set it to null to avoid confusion.
655  DAG.setRoot(SDValue());
656
657  // while the worklist isn't empty, inspect the node on the end of it and
658  // try and combine it.
659  while (!WorkList.empty()) {
660    SDNode *N = WorkList.back();
661    WorkList.pop_back();
662
663    // If N has no uses, it is dead.  Make sure to revisit all N's operands once
664    // N is deleted from the DAG, since they too may now be dead or may have a
665    // reduced number of uses, allowing other xforms.
666    if (N->use_empty() && N != &Dummy) {
667      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
668        AddToWorkList(N->getOperand(i).getNode());
669
670      DAG.DeleteNode(N);
671      continue;
672    }
673
674    SDValue RV = combine(N);
675
676    if (RV.getNode() == 0)
677      continue;
678
679    ++NodesCombined;
680
681    // If we get back the same node we passed in, rather than a new node or
682    // zero, we know that the node must have defined multiple values and
683    // CombineTo was used.  Since CombineTo takes care of the worklist
684    // mechanics for us, we have no work to do in this case.
685    if (RV.getNode() == N)
686      continue;
687
688    assert(N->getOpcode() != ISD::DELETED_NODE &&
689           RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
690           "Node was deleted but visit returned new node!");
691
692    DEBUG(errs() << "\nReplacing.3 ";
693          N->dump(&DAG);
694          errs() << "\nWith: ";
695          RV.getNode()->dump(&DAG);
696          errs() << '\n');
697    WorkListRemover DeadNodes(*this);
698    if (N->getNumValues() == RV.getNode()->getNumValues())
699      DAG.ReplaceAllUsesWith(N, RV.getNode(), &DeadNodes);
700    else {
701      assert(N->getValueType(0) == RV.getValueType() &&
702             N->getNumValues() == 1 && "Type mismatch");
703      SDValue OpV = RV;
704      DAG.ReplaceAllUsesWith(N, &OpV, &DeadNodes);
705    }
706
707    // Push the new node and any users onto the worklist
708    AddToWorkList(RV.getNode());
709    AddUsersToWorkList(RV.getNode());
710
711    // Add any uses of the old node to the worklist in case this node is the
712    // last one that uses them.  They may become dead after this node is
713    // deleted.
714    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
715      AddToWorkList(N->getOperand(i).getNode());
716
717    // Finally, if the node is now dead, remove it from the graph.  The node
718    // may not be dead if the replacement process recursively simplified to
719    // something else needing this node.
720    if (N->use_empty()) {
721      // Nodes can be reintroduced into the worklist.  Make sure we do not
722      // process a node that has been replaced.
723      removeFromWorkList(N);
724
725      // Finally, since the node is now dead, remove it from the graph.
726      DAG.DeleteNode(N);
727    }
728  }
729
730  // If the root changed (e.g. it was a dead load, update the root).
731  DAG.setRoot(Dummy.getValue());
732}
733
734SDValue DAGCombiner::visit(SDNode *N) {
735  switch(N->getOpcode()) {
736  default: break;
737  case ISD::TokenFactor:        return visitTokenFactor(N);
738  case ISD::MERGE_VALUES:       return visitMERGE_VALUES(N);
739  case ISD::ADD:                return visitADD(N);
740  case ISD::SUB:                return visitSUB(N);
741  case ISD::ADDC:               return visitADDC(N);
742  case ISD::ADDE:               return visitADDE(N);
743  case ISD::MUL:                return visitMUL(N);
744  case ISD::SDIV:               return visitSDIV(N);
745  case ISD::UDIV:               return visitUDIV(N);
746  case ISD::SREM:               return visitSREM(N);
747  case ISD::UREM:               return visitUREM(N);
748  case ISD::MULHU:              return visitMULHU(N);
749  case ISD::MULHS:              return visitMULHS(N);
750  case ISD::SMUL_LOHI:          return visitSMUL_LOHI(N);
751  case ISD::UMUL_LOHI:          return visitUMUL_LOHI(N);
752  case ISD::SDIVREM:            return visitSDIVREM(N);
753  case ISD::UDIVREM:            return visitUDIVREM(N);
754  case ISD::AND:                return visitAND(N);
755  case ISD::OR:                 return visitOR(N);
756  case ISD::XOR:                return visitXOR(N);
757  case ISD::SHL:                return visitSHL(N);
758  case ISD::SRA:                return visitSRA(N);
759  case ISD::SRL:                return visitSRL(N);
760  case ISD::CTLZ:               return visitCTLZ(N);
761  case ISD::CTTZ:               return visitCTTZ(N);
762  case ISD::CTPOP:              return visitCTPOP(N);
763  case ISD::SELECT:             return visitSELECT(N);
764  case ISD::SELECT_CC:          return visitSELECT_CC(N);
765  case ISD::SETCC:              return visitSETCC(N);
766  case ISD::SIGN_EXTEND:        return visitSIGN_EXTEND(N);
767  case ISD::ZERO_EXTEND:        return visitZERO_EXTEND(N);
768  case ISD::ANY_EXTEND:         return visitANY_EXTEND(N);
769  case ISD::SIGN_EXTEND_INREG:  return visitSIGN_EXTEND_INREG(N);
770  case ISD::TRUNCATE:           return visitTRUNCATE(N);
771  case ISD::BIT_CONVERT:        return visitBIT_CONVERT(N);
772  case ISD::BUILD_PAIR:         return visitBUILD_PAIR(N);
773  case ISD::FADD:               return visitFADD(N);
774  case ISD::FSUB:               return visitFSUB(N);
775  case ISD::FMUL:               return visitFMUL(N);
776  case ISD::FDIV:               return visitFDIV(N);
777  case ISD::FREM:               return visitFREM(N);
778  case ISD::FCOPYSIGN:          return visitFCOPYSIGN(N);
779  case ISD::SINT_TO_FP:         return visitSINT_TO_FP(N);
780  case ISD::UINT_TO_FP:         return visitUINT_TO_FP(N);
781  case ISD::FP_TO_SINT:         return visitFP_TO_SINT(N);
782  case ISD::FP_TO_UINT:         return visitFP_TO_UINT(N);
783  case ISD::FP_ROUND:           return visitFP_ROUND(N);
784  case ISD::FP_ROUND_INREG:     return visitFP_ROUND_INREG(N);
785  case ISD::FP_EXTEND:          return visitFP_EXTEND(N);
786  case ISD::FNEG:               return visitFNEG(N);
787  case ISD::FABS:               return visitFABS(N);
788  case ISD::BRCOND:             return visitBRCOND(N);
789  case ISD::BR_CC:              return visitBR_CC(N);
790  case ISD::LOAD:               return visitLOAD(N);
791  case ISD::STORE:              return visitSTORE(N);
792  case ISD::INSERT_VECTOR_ELT:  return visitINSERT_VECTOR_ELT(N);
793  case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
794  case ISD::BUILD_VECTOR:       return visitBUILD_VECTOR(N);
795  case ISD::CONCAT_VECTORS:     return visitCONCAT_VECTORS(N);
796  case ISD::VECTOR_SHUFFLE:     return visitVECTOR_SHUFFLE(N);
797  }
798  return SDValue();
799}
800
801SDValue DAGCombiner::combine(SDNode *N) {
802  SDValue RV = visit(N);
803
804  // If nothing happened, try a target-specific DAG combine.
805  if (RV.getNode() == 0) {
806    assert(N->getOpcode() != ISD::DELETED_NODE &&
807           "Node was deleted but visit returned NULL!");
808
809    if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
810        TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
811
812      // Expose the DAG combiner to the target combiner impls.
813      TargetLowering::DAGCombinerInfo
814        DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this);
815
816      RV = TLI.PerformDAGCombine(N, DagCombineInfo);
817    }
818  }
819
820  // If N is a commutative binary node, try commuting it to enable more
821  // sdisel CSE.
822  if (RV.getNode() == 0 &&
823      SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
824      N->getNumValues() == 1) {
825    SDValue N0 = N->getOperand(0);
826    SDValue N1 = N->getOperand(1);
827
828    // Constant operands are canonicalized to RHS.
829    if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
830      SDValue Ops[] = { N1, N0 };
831      SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(),
832                                            Ops, 2);
833      if (CSENode)
834        return SDValue(CSENode, 0);
835    }
836  }
837
838  return RV;
839}
840
841/// getInputChainForNode - Given a node, return its input chain if it has one,
842/// otherwise return a null sd operand.
843static SDValue getInputChainForNode(SDNode *N) {
844  if (unsigned NumOps = N->getNumOperands()) {
845    if (N->getOperand(0).getValueType() == MVT::Other)
846      return N->getOperand(0);
847    else if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
848      return N->getOperand(NumOps-1);
849    for (unsigned i = 1; i < NumOps-1; ++i)
850      if (N->getOperand(i).getValueType() == MVT::Other)
851        return N->getOperand(i);
852  }
853  return SDValue();
854}
855
856SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
857  // If N has two operands, where one has an input chain equal to the other,
858  // the 'other' chain is redundant.
859  if (N->getNumOperands() == 2) {
860    if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
861      return N->getOperand(0);
862    if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
863      return N->getOperand(1);
864  }
865
866  SmallVector<SDNode *, 8> TFs;     // List of token factors to visit.
867  SmallVector<SDValue, 8> Ops;    // Ops for replacing token factor.
868  SmallPtrSet<SDNode*, 16> SeenOps;
869  bool Changed = false;             // If we should replace this token factor.
870
871  // Start out with this token factor.
872  TFs.push_back(N);
873
874  // Iterate through token factors.  The TFs grows when new token factors are
875  // encountered.
876  for (unsigned i = 0; i < TFs.size(); ++i) {
877    SDNode *TF = TFs[i];
878
879    // Check each of the operands.
880    for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) {
881      SDValue Op = TF->getOperand(i);
882
883      switch (Op.getOpcode()) {
884      case ISD::EntryToken:
885        // Entry tokens don't need to be added to the list. They are
886        // rededundant.
887        Changed = true;
888        break;
889
890      case ISD::TokenFactor:
891        if (Op.hasOneUse() &&
892            std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) {
893          // Queue up for processing.
894          TFs.push_back(Op.getNode());
895          // Clean up in case the token factor is removed.
896          AddToWorkList(Op.getNode());
897          Changed = true;
898          break;
899        }
900        // Fall thru
901
902      default:
903        // Only add if it isn't already in the list.
904        if (SeenOps.insert(Op.getNode()))
905          Ops.push_back(Op);
906        else
907          Changed = true;
908        break;
909      }
910    }
911  }
912
913  SDValue Result;
914
915  // If we've change things around then replace token factor.
916  if (Changed) {
917    if (Ops.empty()) {
918      // The entry token is the only possible outcome.
919      Result = DAG.getEntryNode();
920    } else {
921      // New and improved token factor.
922      Result = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
923                           MVT::Other, &Ops[0], Ops.size());
924    }
925
926    // Don't add users to work list.
927    return CombineTo(N, Result, false);
928  }
929
930  return Result;
931}
932
933/// MERGE_VALUES can always be eliminated.
934SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
935  WorkListRemover DeadNodes(*this);
936  // Replacing results may cause a different MERGE_VALUES to suddenly
937  // be CSE'd with N, and carry its uses with it. Iterate until no
938  // uses remain, to ensure that the node can be safely deleted.
939  do {
940    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
941      DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i),
942                                    &DeadNodes);
943  } while (!N->use_empty());
944  removeFromWorkList(N);
945  DAG.DeleteNode(N);
946  return SDValue(N, 0);   // Return N so it doesn't get rechecked!
947}
948
949static
950SDValue combineShlAddConstant(DebugLoc DL, SDValue N0, SDValue N1,
951                              SelectionDAG &DAG) {
952  EVT VT = N0.getValueType();
953  SDValue N00 = N0.getOperand(0);
954  SDValue N01 = N0.getOperand(1);
955  ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01);
956
957  if (N01C && N00.getOpcode() == ISD::ADD && N00.getNode()->hasOneUse() &&
958      isa<ConstantSDNode>(N00.getOperand(1))) {
959    // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
960    N0 = DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT,
961                     DAG.getNode(ISD::SHL, N00.getDebugLoc(), VT,
962                                 N00.getOperand(0), N01),
963                     DAG.getNode(ISD::SHL, N01.getDebugLoc(), VT,
964                                 N00.getOperand(1), N01));
965    return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
966  }
967
968  return SDValue();
969}
970
971SDValue DAGCombiner::visitADD(SDNode *N) {
972  SDValue N0 = N->getOperand(0);
973  SDValue N1 = N->getOperand(1);
974  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
975  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
976  EVT VT = N0.getValueType();
977
978  // fold vector ops
979  if (VT.isVector()) {
980    SDValue FoldedVOp = SimplifyVBinOp(N);
981    if (FoldedVOp.getNode()) return FoldedVOp;
982  }
983
984  // fold (add x, undef) -> undef
985  if (N0.getOpcode() == ISD::UNDEF)
986    return N0;
987  if (N1.getOpcode() == ISD::UNDEF)
988    return N1;
989  // fold (add c1, c2) -> c1+c2
990  if (N0C && N1C)
991    return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C);
992  // canonicalize constant to RHS
993  if (N0C && !N1C)
994    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0);
995  // fold (add x, 0) -> x
996  if (N1C && N1C->isNullValue())
997    return N0;
998  // fold (add Sym, c) -> Sym+c
999  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1000    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
1001        GA->getOpcode() == ISD::GlobalAddress)
1002      return DAG.getGlobalAddress(GA->getGlobal(), VT,
1003                                  GA->getOffset() +
1004                                    (uint64_t)N1C->getSExtValue());
1005  // fold ((c1-A)+c2) -> (c1+c2)-A
1006  if (N1C && N0.getOpcode() == ISD::SUB)
1007    if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0)))
1008      return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1009                         DAG.getConstant(N1C->getAPIntValue()+
1010                                         N0C->getAPIntValue(), VT),
1011                         N0.getOperand(1));
1012  // reassociate add
1013  SDValue RADD = ReassociateOps(ISD::ADD, N->getDebugLoc(), N0, N1);
1014  if (RADD.getNode() != 0)
1015    return RADD;
1016  // fold ((0-A) + B) -> B-A
1017  if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
1018      cast<ConstantSDNode>(N0.getOperand(0))->isNullValue())
1019    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, N0.getOperand(1));
1020  // fold (A + (0-B)) -> A-B
1021  if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) &&
1022      cast<ConstantSDNode>(N1.getOperand(0))->isNullValue())
1023    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1.getOperand(1));
1024  // fold (A+(B-A)) -> B
1025  if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
1026    return N1.getOperand(0);
1027  // fold ((B-A)+A) -> B
1028  if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
1029    return N0.getOperand(0);
1030  // fold (A+(B-(A+C))) to (B-C)
1031  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1032      N0 == N1.getOperand(1).getOperand(0))
1033    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0),
1034                       N1.getOperand(1).getOperand(1));
1035  // fold (A+(B-(C+A))) to (B-C)
1036  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1037      N0 == N1.getOperand(1).getOperand(1))
1038    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0),
1039                       N1.getOperand(1).getOperand(0));
1040  // fold (A+((B-A)+or-C)) to (B+or-C)
1041  if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
1042      N1.getOperand(0).getOpcode() == ISD::SUB &&
1043      N0 == N1.getOperand(0).getOperand(1))
1044    return DAG.getNode(N1.getOpcode(), N->getDebugLoc(), VT,
1045                       N1.getOperand(0).getOperand(0), N1.getOperand(1));
1046
1047  // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
1048  if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
1049    SDValue N00 = N0.getOperand(0);
1050    SDValue N01 = N0.getOperand(1);
1051    SDValue N10 = N1.getOperand(0);
1052    SDValue N11 = N1.getOperand(1);
1053
1054    if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10))
1055      return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1056                         DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, N00, N10),
1057                         DAG.getNode(ISD::ADD, N1.getDebugLoc(), VT, N01, N11));
1058  }
1059
1060  if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0)))
1061    return SDValue(N, 0);
1062
1063  // fold (a+b) -> (a|b) iff a and b share no bits.
1064  if (VT.isInteger() && !VT.isVector()) {
1065    APInt LHSZero, LHSOne;
1066    APInt RHSZero, RHSOne;
1067    APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
1068    DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
1069
1070    if (LHSZero.getBoolValue()) {
1071      DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
1072
1073      // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1074      // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1075      if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
1076          (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
1077        return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1);
1078    }
1079  }
1080
1081  // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
1082  if (N0.getOpcode() == ISD::SHL && N0.getNode()->hasOneUse()) {
1083    SDValue Result = combineShlAddConstant(N->getDebugLoc(), N0, N1, DAG);
1084    if (Result.getNode()) return Result;
1085  }
1086  if (N1.getOpcode() == ISD::SHL && N1.getNode()->hasOneUse()) {
1087    SDValue Result = combineShlAddConstant(N->getDebugLoc(), N1, N0, DAG);
1088    if (Result.getNode()) return Result;
1089  }
1090
1091  return SDValue();
1092}
1093
1094SDValue DAGCombiner::visitADDC(SDNode *N) {
1095  SDValue N0 = N->getOperand(0);
1096  SDValue N1 = N->getOperand(1);
1097  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1098  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1099  EVT VT = N0.getValueType();
1100
1101  // If the flag result is dead, turn this into an ADD.
1102  if (N->hasNUsesOfValue(0, 1))
1103    return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0),
1104                     DAG.getNode(ISD::CARRY_FALSE,
1105                                 N->getDebugLoc(), MVT::Flag));
1106
1107  // canonicalize constant to RHS.
1108  if (N0C && !N1C)
1109    return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
1110
1111  // fold (addc x, 0) -> x + no carry out
1112  if (N1C && N1C->isNullValue())
1113    return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
1114                                        N->getDebugLoc(), MVT::Flag));
1115
1116  // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
1117  APInt LHSZero, LHSOne;
1118  APInt RHSZero, RHSOne;
1119  APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
1120  DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
1121
1122  if (LHSZero.getBoolValue()) {
1123    DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
1124
1125    // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1126    // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1127    if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
1128        (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
1129      return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
1130                       DAG.getNode(ISD::CARRY_FALSE,
1131                                   N->getDebugLoc(), MVT::Flag));
1132  }
1133
1134  return SDValue();
1135}
1136
1137SDValue DAGCombiner::visitADDE(SDNode *N) {
1138  SDValue N0 = N->getOperand(0);
1139  SDValue N1 = N->getOperand(1);
1140  SDValue CarryIn = N->getOperand(2);
1141  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1142  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1143
1144  // canonicalize constant to RHS
1145  if (N0C && !N1C)
1146    return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(),
1147                       N1, N0, CarryIn);
1148
1149  // fold (adde x, y, false) -> (addc x, y)
1150  if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
1151    return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
1152
1153  return SDValue();
1154}
1155
1156SDValue DAGCombiner::visitSUB(SDNode *N) {
1157  SDValue N0 = N->getOperand(0);
1158  SDValue N1 = N->getOperand(1);
1159  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1160  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1161  EVT VT = N0.getValueType();
1162
1163  // fold vector ops
1164  if (VT.isVector()) {
1165    SDValue FoldedVOp = SimplifyVBinOp(N);
1166    if (FoldedVOp.getNode()) return FoldedVOp;
1167  }
1168
1169  // fold (sub x, x) -> 0
1170  if (N0 == N1)
1171    return DAG.getConstant(0, N->getValueType(0));
1172  // fold (sub c1, c2) -> c1-c2
1173  if (N0C && N1C)
1174    return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C);
1175  // fold (sub x, c) -> (add x, -c)
1176  if (N1C)
1177    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0,
1178                       DAG.getConstant(-N1C->getAPIntValue(), VT));
1179  // fold (A+B)-A -> B
1180  if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
1181    return N0.getOperand(1);
1182  // fold (A+B)-B -> A
1183  if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
1184    return N0.getOperand(0);
1185  // fold ((A+(B+or-C))-B) -> A+or-C
1186  if (N0.getOpcode() == ISD::ADD &&
1187      (N0.getOperand(1).getOpcode() == ISD::SUB ||
1188       N0.getOperand(1).getOpcode() == ISD::ADD) &&
1189      N0.getOperand(1).getOperand(0) == N1)
1190    return DAG.getNode(N0.getOperand(1).getOpcode(), N->getDebugLoc(), VT,
1191                       N0.getOperand(0), N0.getOperand(1).getOperand(1));
1192  // fold ((A+(C+B))-B) -> A+C
1193  if (N0.getOpcode() == ISD::ADD &&
1194      N0.getOperand(1).getOpcode() == ISD::ADD &&
1195      N0.getOperand(1).getOperand(1) == N1)
1196    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT,
1197                       N0.getOperand(0), N0.getOperand(1).getOperand(0));
1198  // fold ((A-(B-C))-C) -> A-B
1199  if (N0.getOpcode() == ISD::SUB &&
1200      N0.getOperand(1).getOpcode() == ISD::SUB &&
1201      N0.getOperand(1).getOperand(1) == N1)
1202    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1203                       N0.getOperand(0), N0.getOperand(1).getOperand(0));
1204
1205  // If either operand of a sub is undef, the result is undef
1206  if (N0.getOpcode() == ISD::UNDEF)
1207    return N0;
1208  if (N1.getOpcode() == ISD::UNDEF)
1209    return N1;
1210
1211  // If the relocation model supports it, consider symbol offsets.
1212  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1213    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
1214      // fold (sub Sym, c) -> Sym-c
1215      if (N1C && GA->getOpcode() == ISD::GlobalAddress)
1216        return DAG.getGlobalAddress(GA->getGlobal(), VT,
1217                                    GA->getOffset() -
1218                                      (uint64_t)N1C->getSExtValue());
1219      // fold (sub Sym+c1, Sym+c2) -> c1-c2
1220      if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
1221        if (GA->getGlobal() == GB->getGlobal())
1222          return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
1223                                 VT);
1224    }
1225
1226  return SDValue();
1227}
1228
1229SDValue DAGCombiner::visitMUL(SDNode *N) {
1230  SDValue N0 = N->getOperand(0);
1231  SDValue N1 = N->getOperand(1);
1232  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1233  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1234  EVT VT = N0.getValueType();
1235
1236  // fold vector ops
1237  if (VT.isVector()) {
1238    SDValue FoldedVOp = SimplifyVBinOp(N);
1239    if (FoldedVOp.getNode()) return FoldedVOp;
1240  }
1241
1242  // fold (mul x, undef) -> 0
1243  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1244    return DAG.getConstant(0, VT);
1245  // fold (mul c1, c2) -> c1*c2
1246  if (N0C && N1C)
1247    return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0C, N1C);
1248  // canonicalize constant to RHS
1249  if (N0C && !N1C)
1250    return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, N1, N0);
1251  // fold (mul x, 0) -> 0
1252  if (N1C && N1C->isNullValue())
1253    return N1;
1254  // fold (mul x, -1) -> 0-x
1255  if (N1C && N1C->isAllOnesValue())
1256    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1257                       DAG.getConstant(0, VT), N0);
1258  // fold (mul x, (1 << c)) -> x << c
1259  if (N1C && N1C->getAPIntValue().isPowerOf2())
1260    return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
1261                       DAG.getConstant(N1C->getAPIntValue().logBase2(),
1262                                       getShiftAmountTy()));
1263  // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
1264  if (N1C && (-N1C->getAPIntValue()).isPowerOf2()) {
1265    unsigned Log2Val = (-N1C->getAPIntValue()).logBase2();
1266    // FIXME: If the input is something that is easily negated (e.g. a
1267    // single-use add), we should put the negate there.
1268    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1269                       DAG.getConstant(0, VT),
1270                       DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
1271                            DAG.getConstant(Log2Val, getShiftAmountTy())));
1272  }
1273  // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
1274  if (N1C && N0.getOpcode() == ISD::SHL &&
1275      isa<ConstantSDNode>(N0.getOperand(1))) {
1276    SDValue C3 = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1277                             N1, N0.getOperand(1));
1278    AddToWorkList(C3.getNode());
1279    return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1280                       N0.getOperand(0), C3);
1281  }
1282
1283  // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
1284  // use.
1285  {
1286    SDValue Sh(0,0), Y(0,0);
1287    // Check for both (mul (shl X, C), Y)  and  (mul Y, (shl X, C)).
1288    if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
1289        N0.getNode()->hasOneUse()) {
1290      Sh = N0; Y = N1;
1291    } else if (N1.getOpcode() == ISD::SHL &&
1292               isa<ConstantSDNode>(N1.getOperand(1)) &&
1293               N1.getNode()->hasOneUse()) {
1294      Sh = N1; Y = N0;
1295    }
1296
1297    if (Sh.getNode()) {
1298      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1299                                Sh.getOperand(0), Y);
1300      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1301                         Mul, Sh.getOperand(1));
1302    }
1303  }
1304
1305  // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
1306  if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
1307      isa<ConstantSDNode>(N0.getOperand(1)))
1308    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT,
1309                       DAG.getNode(ISD::MUL, N0.getDebugLoc(), VT,
1310                                   N0.getOperand(0), N1),
1311                       DAG.getNode(ISD::MUL, N1.getDebugLoc(), VT,
1312                                   N0.getOperand(1), N1));
1313
1314  // reassociate mul
1315  SDValue RMUL = ReassociateOps(ISD::MUL, N->getDebugLoc(), N0, N1);
1316  if (RMUL.getNode() != 0)
1317    return RMUL;
1318
1319  return SDValue();
1320}
1321
1322SDValue DAGCombiner::visitSDIV(SDNode *N) {
1323  SDValue N0 = N->getOperand(0);
1324  SDValue N1 = N->getOperand(1);
1325  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1326  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1327  EVT VT = N->getValueType(0);
1328
1329  // fold vector ops
1330  if (VT.isVector()) {
1331    SDValue FoldedVOp = SimplifyVBinOp(N);
1332    if (FoldedVOp.getNode()) return FoldedVOp;
1333  }
1334
1335  // fold (sdiv c1, c2) -> c1/c2
1336  if (N0C && N1C && !N1C->isNullValue())
1337    return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C);
1338  // fold (sdiv X, 1) -> X
1339  if (N1C && N1C->getSExtValue() == 1LL)
1340    return N0;
1341  // fold (sdiv X, -1) -> 0-X
1342  if (N1C && N1C->isAllOnesValue())
1343    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1344                       DAG.getConstant(0, VT), N0);
1345  // If we know the sign bits of both operands are zero, strength reduce to a
1346  // udiv instead.  Handles (X&15) /s 4 -> X&15 >> 2
1347  if (!VT.isVector()) {
1348    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
1349      return DAG.getNode(ISD::UDIV, N->getDebugLoc(), N1.getValueType(),
1350                         N0, N1);
1351  }
1352  // fold (sdiv X, pow2) -> simple ops after legalize
1353  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap() &&
1354      (isPowerOf2_64(N1C->getSExtValue()) ||
1355       isPowerOf2_64(-N1C->getSExtValue()))) {
1356    // If dividing by powers of two is cheap, then don't perform the following
1357    // fold.
1358    if (TLI.isPow2DivCheap())
1359      return SDValue();
1360
1361    int64_t pow2 = N1C->getSExtValue();
1362    int64_t abs2 = pow2 > 0 ? pow2 : -pow2;
1363    unsigned lg2 = Log2_64(abs2);
1364
1365    // Splat the sign bit into the register
1366    SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
1367                              DAG.getConstant(VT.getSizeInBits()-1,
1368                                              getShiftAmountTy()));
1369    AddToWorkList(SGN.getNode());
1370
1371    // Add (N0 < 0) ? abs2 - 1 : 0;
1372    SDValue SRL = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, SGN,
1373                              DAG.getConstant(VT.getSizeInBits() - lg2,
1374                                              getShiftAmountTy()));
1375    SDValue ADD = DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, SRL);
1376    AddToWorkList(SRL.getNode());
1377    AddToWorkList(ADD.getNode());    // Divide by pow2
1378    SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, ADD,
1379                              DAG.getConstant(lg2, getShiftAmountTy()));
1380
1381    // If we're dividing by a positive value, we're done.  Otherwise, we must
1382    // negate the result.
1383    if (pow2 > 0)
1384      return SRA;
1385
1386    AddToWorkList(SRA.getNode());
1387    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1388                       DAG.getConstant(0, VT), SRA);
1389  }
1390
1391  // if integer divide is expensive and we satisfy the requirements, emit an
1392  // alternate sequence.
1393  if (N1C && (N1C->getSExtValue() < -1 || N1C->getSExtValue() > 1) &&
1394      !TLI.isIntDivCheap()) {
1395    SDValue Op = BuildSDIV(N);
1396    if (Op.getNode()) return Op;
1397  }
1398
1399  // undef / X -> 0
1400  if (N0.getOpcode() == ISD::UNDEF)
1401    return DAG.getConstant(0, VT);
1402  // X / undef -> undef
1403  if (N1.getOpcode() == ISD::UNDEF)
1404    return N1;
1405
1406  return SDValue();
1407}
1408
1409SDValue DAGCombiner::visitUDIV(SDNode *N) {
1410  SDValue N0 = N->getOperand(0);
1411  SDValue N1 = N->getOperand(1);
1412  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1413  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1414  EVT VT = N->getValueType(0);
1415
1416  // fold vector ops
1417  if (VT.isVector()) {
1418    SDValue FoldedVOp = SimplifyVBinOp(N);
1419    if (FoldedVOp.getNode()) return FoldedVOp;
1420  }
1421
1422  // fold (udiv c1, c2) -> c1/c2
1423  if (N0C && N1C && !N1C->isNullValue())
1424    return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C);
1425  // fold (udiv x, (1 << c)) -> x >>u c
1426  if (N1C && N1C->getAPIntValue().isPowerOf2())
1427    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
1428                       DAG.getConstant(N1C->getAPIntValue().logBase2(),
1429                                       getShiftAmountTy()));
1430  // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1431  if (N1.getOpcode() == ISD::SHL) {
1432    if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
1433      if (SHC->getAPIntValue().isPowerOf2()) {
1434        EVT ADDVT = N1.getOperand(1).getValueType();
1435        SDValue Add = DAG.getNode(ISD::ADD, N->getDebugLoc(), ADDVT,
1436                                  N1.getOperand(1),
1437                                  DAG.getConstant(SHC->getAPIntValue()
1438                                                                  .logBase2(),
1439                                                  ADDVT));
1440        AddToWorkList(Add.getNode());
1441        return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, Add);
1442      }
1443    }
1444  }
1445  // fold (udiv x, c) -> alternate
1446  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) {
1447    SDValue Op = BuildUDIV(N);
1448    if (Op.getNode()) return Op;
1449  }
1450
1451  // undef / X -> 0
1452  if (N0.getOpcode() == ISD::UNDEF)
1453    return DAG.getConstant(0, VT);
1454  // X / undef -> undef
1455  if (N1.getOpcode() == ISD::UNDEF)
1456    return N1;
1457
1458  return SDValue();
1459}
1460
1461SDValue DAGCombiner::visitSREM(SDNode *N) {
1462  SDValue N0 = N->getOperand(0);
1463  SDValue N1 = N->getOperand(1);
1464  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1465  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1466  EVT VT = N->getValueType(0);
1467
1468  // fold (srem c1, c2) -> c1%c2
1469  if (N0C && N1C && !N1C->isNullValue())
1470    return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C);
1471  // If we know the sign bits of both operands are zero, strength reduce to a
1472  // urem instead.  Handles (X & 0x0FFFFFFF) %s 16 -> X&15
1473  if (!VT.isVector()) {
1474    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
1475      return DAG.getNode(ISD::UREM, N->getDebugLoc(), VT, N0, N1);
1476  }
1477
1478  // If X/C can be simplified by the division-by-constant logic, lower
1479  // X%C to the equivalent of X-X/C*C.
1480  if (N1C && !N1C->isNullValue()) {
1481    SDValue Div = DAG.getNode(ISD::SDIV, N->getDebugLoc(), VT, N0, N1);
1482    AddToWorkList(Div.getNode());
1483    SDValue OptimizedDiv = combine(Div.getNode());
1484    if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
1485      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1486                                OptimizedDiv, N1);
1487      SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul);
1488      AddToWorkList(Mul.getNode());
1489      return Sub;
1490    }
1491  }
1492
1493  // undef % X -> 0
1494  if (N0.getOpcode() == ISD::UNDEF)
1495    return DAG.getConstant(0, VT);
1496  // X % undef -> undef
1497  if (N1.getOpcode() == ISD::UNDEF)
1498    return N1;
1499
1500  return SDValue();
1501}
1502
1503SDValue DAGCombiner::visitUREM(SDNode *N) {
1504  SDValue N0 = N->getOperand(0);
1505  SDValue N1 = N->getOperand(1);
1506  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1507  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1508  EVT VT = N->getValueType(0);
1509
1510  // fold (urem c1, c2) -> c1%c2
1511  if (N0C && N1C && !N1C->isNullValue())
1512    return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C);
1513  // fold (urem x, pow2) -> (and x, pow2-1)
1514  if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2())
1515    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0,
1516                       DAG.getConstant(N1C->getAPIntValue()-1,VT));
1517  // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
1518  if (N1.getOpcode() == ISD::SHL) {
1519    if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
1520      if (SHC->getAPIntValue().isPowerOf2()) {
1521        SDValue Add =
1522          DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1,
1523                 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()),
1524                                 VT));
1525        AddToWorkList(Add.getNode());
1526        return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, Add);
1527      }
1528    }
1529  }
1530
1531  // If X/C can be simplified by the division-by-constant logic, lower
1532  // X%C to the equivalent of X-X/C*C.
1533  if (N1C && !N1C->isNullValue()) {
1534    SDValue Div = DAG.getNode(ISD::UDIV, N->getDebugLoc(), VT, N0, N1);
1535    AddToWorkList(Div.getNode());
1536    SDValue OptimizedDiv = combine(Div.getNode());
1537    if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
1538      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1539                                OptimizedDiv, N1);
1540      SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul);
1541      AddToWorkList(Mul.getNode());
1542      return Sub;
1543    }
1544  }
1545
1546  // undef % X -> 0
1547  if (N0.getOpcode() == ISD::UNDEF)
1548    return DAG.getConstant(0, VT);
1549  // X % undef -> undef
1550  if (N1.getOpcode() == ISD::UNDEF)
1551    return N1;
1552
1553  return SDValue();
1554}
1555
1556SDValue DAGCombiner::visitMULHS(SDNode *N) {
1557  SDValue N0 = N->getOperand(0);
1558  SDValue N1 = N->getOperand(1);
1559  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1560  EVT VT = N->getValueType(0);
1561
1562  // fold (mulhs x, 0) -> 0
1563  if (N1C && N1C->isNullValue())
1564    return N1;
1565  // fold (mulhs x, 1) -> (sra x, size(x)-1)
1566  if (N1C && N1C->getAPIntValue() == 1)
1567    return DAG.getNode(ISD::SRA, N->getDebugLoc(), N0.getValueType(), N0,
1568                       DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
1569                                       getShiftAmountTy()));
1570  // fold (mulhs x, undef) -> 0
1571  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1572    return DAG.getConstant(0, VT);
1573
1574  return SDValue();
1575}
1576
1577SDValue DAGCombiner::visitMULHU(SDNode *N) {
1578  SDValue N0 = N->getOperand(0);
1579  SDValue N1 = N->getOperand(1);
1580  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1581  EVT VT = N->getValueType(0);
1582
1583  // fold (mulhu x, 0) -> 0
1584  if (N1C && N1C->isNullValue())
1585    return N1;
1586  // fold (mulhu x, 1) -> 0
1587  if (N1C && N1C->getAPIntValue() == 1)
1588    return DAG.getConstant(0, N0.getValueType());
1589  // fold (mulhu x, undef) -> 0
1590  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1591    return DAG.getConstant(0, VT);
1592
1593  return SDValue();
1594}
1595
1596/// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that
1597/// compute two values. LoOp and HiOp give the opcodes for the two computations
1598/// that are being performed. Return true if a simplification was made.
1599///
1600SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
1601                                                unsigned HiOp) {
1602  // If the high half is not needed, just compute the low half.
1603  bool HiExists = N->hasAnyUseOfValue(1);
1604  if (!HiExists &&
1605      (!LegalOperations ||
1606       TLI.isOperationLegal(LoOp, N->getValueType(0)))) {
1607    SDValue Res = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0),
1608                              N->op_begin(), N->getNumOperands());
1609    return CombineTo(N, Res, Res);
1610  }
1611
1612  // If the low half is not needed, just compute the high half.
1613  bool LoExists = N->hasAnyUseOfValue(0);
1614  if (!LoExists &&
1615      (!LegalOperations ||
1616       TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
1617    SDValue Res = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1),
1618                              N->op_begin(), N->getNumOperands());
1619    return CombineTo(N, Res, Res);
1620  }
1621
1622  // If both halves are used, return as it is.
1623  if (LoExists && HiExists)
1624    return SDValue();
1625
1626  // If the two computed results can be simplified separately, separate them.
1627  if (LoExists) {
1628    SDValue Lo = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0),
1629                             N->op_begin(), N->getNumOperands());
1630    AddToWorkList(Lo.getNode());
1631    SDValue LoOpt = combine(Lo.getNode());
1632    if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
1633        (!LegalOperations ||
1634         TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
1635      return CombineTo(N, LoOpt, LoOpt);
1636  }
1637
1638  if (HiExists) {
1639    SDValue Hi = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1),
1640                             N->op_begin(), N->getNumOperands());
1641    AddToWorkList(Hi.getNode());
1642    SDValue HiOpt = combine(Hi.getNode());
1643    if (HiOpt.getNode() && HiOpt != Hi &&
1644        (!LegalOperations ||
1645         TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
1646      return CombineTo(N, HiOpt, HiOpt);
1647  }
1648
1649  return SDValue();
1650}
1651
1652SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
1653  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
1654  if (Res.getNode()) return Res;
1655
1656  return SDValue();
1657}
1658
1659SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
1660  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
1661  if (Res.getNode()) return Res;
1662
1663  return SDValue();
1664}
1665
1666SDValue DAGCombiner::visitSDIVREM(SDNode *N) {
1667  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM);
1668  if (Res.getNode()) return Res;
1669
1670  return SDValue();
1671}
1672
1673SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
1674  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM);
1675  if (Res.getNode()) return Res;
1676
1677  return SDValue();
1678}
1679
1680/// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with
1681/// two operands of the same opcode, try to simplify it.
1682SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
1683  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
1684  EVT VT = N0.getValueType();
1685  assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
1686
1687  // For each of OP in AND/OR/XOR:
1688  // fold (OP (zext x), (zext y)) -> (zext (OP x, y))
1689  // fold (OP (sext x), (sext y)) -> (sext (OP x, y))
1690  // fold (OP (aext x), (aext y)) -> (aext (OP x, y))
1691  // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
1692  if ((N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND||
1693       N0.getOpcode() == ISD::SIGN_EXTEND ||
1694       (N0.getOpcode() == ISD::TRUNCATE &&
1695        !TLI.isTruncateFree(N0.getOperand(0).getValueType(), VT))) &&
1696      N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() &&
1697      (!LegalOperations ||
1698       TLI.isOperationLegal(N->getOpcode(), N0.getOperand(0).getValueType()))) {
1699    SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
1700                                 N0.getOperand(0).getValueType(),
1701                                 N0.getOperand(0), N1.getOperand(0));
1702    AddToWorkList(ORNode.getNode());
1703    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, ORNode);
1704  }
1705
1706  // For each of OP in SHL/SRL/SRA/AND...
1707  //   fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
1708  //   fold (or  (OP x, z), (OP y, z)) -> (OP (or  x, y), z)
1709  //   fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
1710  if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
1711       N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
1712      N0.getOperand(1) == N1.getOperand(1)) {
1713    SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
1714                                 N0.getOperand(0).getValueType(),
1715                                 N0.getOperand(0), N1.getOperand(0));
1716    AddToWorkList(ORNode.getNode());
1717    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
1718                       ORNode, N0.getOperand(1));
1719  }
1720
1721  return SDValue();
1722}
1723
1724SDValue DAGCombiner::visitAND(SDNode *N) {
1725  SDValue N0 = N->getOperand(0);
1726  SDValue N1 = N->getOperand(1);
1727  SDValue LL, LR, RL, RR, CC0, CC1;
1728  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1729  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1730  EVT VT = N1.getValueType();
1731  unsigned BitWidth = VT.getSizeInBits();
1732
1733  // fold vector ops
1734  if (VT.isVector()) {
1735    SDValue FoldedVOp = SimplifyVBinOp(N);
1736    if (FoldedVOp.getNode()) return FoldedVOp;
1737  }
1738
1739  // fold (and x, undef) -> 0
1740  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1741    return DAG.getConstant(0, VT);
1742  // fold (and c1, c2) -> c1&c2
1743  if (N0C && N1C)
1744    return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C);
1745  // canonicalize constant to RHS
1746  if (N0C && !N1C)
1747    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N1, N0);
1748  // fold (and x, -1) -> x
1749  if (N1C && N1C->isAllOnesValue())
1750    return N0;
1751  // if (and x, c) is known to be zero, return 0
1752  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
1753                                   APInt::getAllOnesValue(BitWidth)))
1754    return DAG.getConstant(0, VT);
1755  // reassociate and
1756  SDValue RAND = ReassociateOps(ISD::AND, N->getDebugLoc(), N0, N1);
1757  if (RAND.getNode() != 0)
1758    return RAND;
1759  // fold (and (or x, 0xFFFF), 0xFF) -> 0xFF
1760  if (N1C && N0.getOpcode() == ISD::OR)
1761    if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
1762      if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue())
1763        return N1;
1764  // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
1765  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
1766    SDValue N0Op0 = N0.getOperand(0);
1767    APInt Mask = ~N1C->getAPIntValue();
1768    Mask.trunc(N0Op0.getValueSizeInBits());
1769    if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
1770      SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(),
1771                                 N0.getValueType(), N0Op0);
1772
1773      // Replace uses of the AND with uses of the Zero extend node.
1774      CombineTo(N, Zext);
1775
1776      // We actually want to replace all uses of the any_extend with the
1777      // zero_extend, to avoid duplicating things.  This will later cause this
1778      // AND to be folded.
1779      CombineTo(N0.getNode(), Zext);
1780      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1781    }
1782  }
1783  // fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
1784  if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
1785    ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
1786    ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
1787
1788    if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
1789        LL.getValueType().isInteger()) {
1790      // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0)
1791      if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
1792        SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(),
1793                                     LR.getValueType(), LL, RL);
1794        AddToWorkList(ORNode.getNode());
1795        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
1796      }
1797      // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
1798      if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
1799        SDValue ANDNode = DAG.getNode(ISD::AND, N0.getDebugLoc(),
1800                                      LR.getValueType(), LL, RL);
1801        AddToWorkList(ANDNode.getNode());
1802        return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1);
1803      }
1804      // fold (and (setgt X,  -1), (setgt Y,  -1)) -> (setgt (or X, Y), -1)
1805      if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
1806        SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(),
1807                                     LR.getValueType(), LL, RL);
1808        AddToWorkList(ORNode.getNode());
1809        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
1810      }
1811    }
1812    // canonicalize equivalent to ll == rl
1813    if (LL == RR && LR == RL) {
1814      Op1 = ISD::getSetCCSwappedOperands(Op1);
1815      std::swap(RL, RR);
1816    }
1817    if (LL == RL && LR == RR) {
1818      bool isInteger = LL.getValueType().isInteger();
1819      ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
1820      if (Result != ISD::SETCC_INVALID &&
1821          (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType())))
1822        return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(),
1823                            LL, LR, Result);
1824    }
1825  }
1826
1827  // Simplify: (and (op x...), (op y...))  -> (op (and x, y))
1828  if (N0.getOpcode() == N1.getOpcode()) {
1829    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
1830    if (Tmp.getNode()) return Tmp;
1831  }
1832
1833  // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
1834  // fold (and (sra)) -> (and (srl)) when possible.
1835  if (!VT.isVector() &&
1836      SimplifyDemandedBits(SDValue(N, 0)))
1837    return SDValue(N, 0);
1838  // fold (zext_inreg (extload x)) -> (zextload x)
1839  if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
1840    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
1841    EVT MemVT = LN0->getMemoryVT();
1842    // If we zero all the possible extended bits, then we can turn this into
1843    // a zextload if we are running before legalize or the operation is legal.
1844    unsigned BitWidth = N1.getValueSizeInBits();
1845    if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
1846                                     BitWidth - MemVT.getSizeInBits())) &&
1847        ((!LegalOperations && !LN0->isVolatile()) ||
1848         TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
1849      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
1850                                       LN0->getChain(), LN0->getBasePtr(),
1851                                       LN0->getSrcValue(),
1852                                       LN0->getSrcValueOffset(), MemVT,
1853                                       LN0->isVolatile(), LN0->getAlignment());
1854      AddToWorkList(N);
1855      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
1856      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1857    }
1858  }
1859  // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
1860  if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
1861      N0.hasOneUse()) {
1862    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
1863    EVT MemVT = LN0->getMemoryVT();
1864    // If we zero all the possible extended bits, then we can turn this into
1865    // a zextload if we are running before legalize or the operation is legal.
1866    unsigned BitWidth = N1.getValueSizeInBits();
1867    if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
1868                                     BitWidth - MemVT.getSizeInBits())) &&
1869        ((!LegalOperations && !LN0->isVolatile()) ||
1870         TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
1871      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
1872                                       LN0->getChain(),
1873                                       LN0->getBasePtr(), LN0->getSrcValue(),
1874                                       LN0->getSrcValueOffset(), MemVT,
1875                                       LN0->isVolatile(), LN0->getAlignment());
1876      AddToWorkList(N);
1877      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
1878      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1879    }
1880  }
1881
1882  // fold (and (load x), 255) -> (zextload x, i8)
1883  // fold (and (extload x, i16), 255) -> (zextload x, i8)
1884  if (N1C && N0.getOpcode() == ISD::LOAD) {
1885    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
1886    if (LN0->getExtensionType() != ISD::SEXTLOAD &&
1887        LN0->isUnindexed() && N0.hasOneUse() &&
1888        // Do not change the width of a volatile load.
1889        !LN0->isVolatile()) {
1890      EVT ExtVT = MVT::Other;
1891      uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
1892      if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue()))
1893        ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
1894
1895      EVT LoadedVT = LN0->getMemoryVT();
1896
1897      // Do not generate loads of non-round integer types since these can
1898      // be expensive (and would be wrong if the type is not byte sized).
1899      if (ExtVT != MVT::Other && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
1900          (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
1901        EVT PtrType = N0.getOperand(1).getValueType();
1902
1903        // For big endian targets, we need to add an offset to the pointer to
1904        // load the correct bytes.  For little endian systems, we merely need to
1905        // read fewer bytes from the same pointer.
1906        unsigned LVTStoreBytes = LoadedVT.getStoreSize();
1907        unsigned EVTStoreBytes = ExtVT.getStoreSize();
1908        unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
1909        unsigned Alignment = LN0->getAlignment();
1910        SDValue NewPtr = LN0->getBasePtr();
1911
1912        if (TLI.isBigEndian()) {
1913          NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), PtrType,
1914                               NewPtr, DAG.getConstant(PtrOff, PtrType));
1915          Alignment = MinAlign(Alignment, PtrOff);
1916        }
1917
1918        AddToWorkList(NewPtr.getNode());
1919        SDValue Load =
1920          DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), VT, LN0->getChain(),
1921                         NewPtr, LN0->getSrcValue(), LN0->getSrcValueOffset(),
1922                         ExtVT, LN0->isVolatile(), Alignment);
1923        AddToWorkList(N);
1924        CombineTo(N0.getNode(), Load, Load.getValue(1));
1925        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1926      }
1927    }
1928  }
1929
1930  return SDValue();
1931}
1932
1933SDValue DAGCombiner::visitOR(SDNode *N) {
1934  SDValue N0 = N->getOperand(0);
1935  SDValue N1 = N->getOperand(1);
1936  SDValue LL, LR, RL, RR, CC0, CC1;
1937  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1938  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1939  EVT VT = N1.getValueType();
1940
1941  // fold vector ops
1942  if (VT.isVector()) {
1943    SDValue FoldedVOp = SimplifyVBinOp(N);
1944    if (FoldedVOp.getNode()) return FoldedVOp;
1945  }
1946
1947  // fold (or x, undef) -> -1
1948  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1949    return DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
1950  // fold (or c1, c2) -> c1|c2
1951  if (N0C && N1C)
1952    return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C);
1953  // canonicalize constant to RHS
1954  if (N0C && !N1C)
1955    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N1, N0);
1956  // fold (or x, 0) -> x
1957  if (N1C && N1C->isNullValue())
1958    return N0;
1959  // fold (or x, -1) -> -1
1960  if (N1C && N1C->isAllOnesValue())
1961    return N1;
1962  // fold (or x, c) -> c iff (x & ~c) == 0
1963  if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
1964    return N1;
1965  // reassociate or
1966  SDValue ROR = ReassociateOps(ISD::OR, N->getDebugLoc(), N0, N1);
1967  if (ROR.getNode() != 0)
1968    return ROR;
1969  // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
1970  if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
1971             isa<ConstantSDNode>(N0.getOperand(1))) {
1972    ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
1973    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
1974                       DAG.getNode(ISD::OR, N0.getDebugLoc(), VT,
1975                                   N0.getOperand(0), N1),
1976                       DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1));
1977  }
1978  // fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
1979  if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
1980    ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
1981    ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
1982
1983    if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
1984        LL.getValueType().isInteger()) {
1985      // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0)
1986      // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0)
1987      if (cast<ConstantSDNode>(LR)->isNullValue() &&
1988          (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
1989        SDValue ORNode = DAG.getNode(ISD::OR, LR.getDebugLoc(),
1990                                     LR.getValueType(), LL, RL);
1991        AddToWorkList(ORNode.getNode());
1992        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
1993      }
1994      // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
1995      // fold (or (setgt X, -1), (setgt Y  -1)) -> (setgt (and X, Y), -1)
1996      if (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
1997          (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
1998        SDValue ANDNode = DAG.getNode(ISD::AND, LR.getDebugLoc(),
1999                                      LR.getValueType(), LL, RL);
2000        AddToWorkList(ANDNode.getNode());
2001        return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1);
2002      }
2003    }
2004    // canonicalize equivalent to ll == rl
2005    if (LL == RR && LR == RL) {
2006      Op1 = ISD::getSetCCSwappedOperands(Op1);
2007      std::swap(RL, RR);
2008    }
2009    if (LL == RL && LR == RR) {
2010      bool isInteger = LL.getValueType().isInteger();
2011      ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
2012      if (Result != ISD::SETCC_INVALID &&
2013          (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType())))
2014        return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(),
2015                            LL, LR, Result);
2016    }
2017  }
2018
2019  // Simplify: (or (op x...), (op y...))  -> (op (or x, y))
2020  if (N0.getOpcode() == N1.getOpcode()) {
2021    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2022    if (Tmp.getNode()) return Tmp;
2023  }
2024
2025  // (or (and X, C1), (and Y, C2))  -> (and (or X, Y), C3) if possible.
2026  if (N0.getOpcode() == ISD::AND &&
2027      N1.getOpcode() == ISD::AND &&
2028      N0.getOperand(1).getOpcode() == ISD::Constant &&
2029      N1.getOperand(1).getOpcode() == ISD::Constant &&
2030      // Don't increase # computations.
2031      (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
2032    // We can only do this xform if we know that bits from X that are set in C2
2033    // but not in C1 are already zero.  Likewise for Y.
2034    const APInt &LHSMask =
2035      cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
2036    const APInt &RHSMask =
2037      cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue();
2038
2039    if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
2040        DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
2041      SDValue X = DAG.getNode(ISD::OR, N0.getDebugLoc(), VT,
2042                              N0.getOperand(0), N1.getOperand(0));
2043      return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, X,
2044                         DAG.getConstant(LHSMask | RHSMask, VT));
2045    }
2046  }
2047
2048  // See if this is some rotate idiom.
2049  if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc()))
2050    return SDValue(Rot, 0);
2051
2052  return SDValue();
2053}
2054
2055/// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present.
2056static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
2057  if (Op.getOpcode() == ISD::AND) {
2058    if (isa<ConstantSDNode>(Op.getOperand(1))) {
2059      Mask = Op.getOperand(1);
2060      Op = Op.getOperand(0);
2061    } else {
2062      return false;
2063    }
2064  }
2065
2066  if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
2067    Shift = Op;
2068    return true;
2069  }
2070
2071  return false;
2072}
2073
2074// MatchRotate - Handle an 'or' of two operands.  If this is one of the many
2075// idioms for rotate, and if the target supports rotation instructions, generate
2076// a rot[lr].
2077SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
2078  // Must be a legal type.  Expanded 'n promoted things won't work with rotates.
2079  EVT VT = LHS.getValueType();
2080  if (!TLI.isTypeLegal(VT)) return 0;
2081
2082  // The target must have at least one rotate flavor.
2083  bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT);
2084  bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT);
2085  if (!HasROTL && !HasROTR) return 0;
2086
2087  // Match "(X shl/srl V1) & V2" where V2 may not be present.
2088  SDValue LHSShift;   // The shift.
2089  SDValue LHSMask;    // AND value if any.
2090  if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
2091    return 0; // Not part of a rotate.
2092
2093  SDValue RHSShift;   // The shift.
2094  SDValue RHSMask;    // AND value if any.
2095  if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
2096    return 0; // Not part of a rotate.
2097
2098  if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
2099    return 0;   // Not shifting the same value.
2100
2101  if (LHSShift.getOpcode() == RHSShift.getOpcode())
2102    return 0;   // Shifts must disagree.
2103
2104  // Canonicalize shl to left side in a shl/srl pair.
2105  if (RHSShift.getOpcode() == ISD::SHL) {
2106    std::swap(LHS, RHS);
2107    std::swap(LHSShift, RHSShift);
2108    std::swap(LHSMask , RHSMask );
2109  }
2110
2111  unsigned OpSizeInBits = VT.getSizeInBits();
2112  SDValue LHSShiftArg = LHSShift.getOperand(0);
2113  SDValue LHSShiftAmt = LHSShift.getOperand(1);
2114  SDValue RHSShiftAmt = RHSShift.getOperand(1);
2115
2116  // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
2117  // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
2118  if (LHSShiftAmt.getOpcode() == ISD::Constant &&
2119      RHSShiftAmt.getOpcode() == ISD::Constant) {
2120    uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue();
2121    uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue();
2122    if ((LShVal + RShVal) != OpSizeInBits)
2123      return 0;
2124
2125    SDValue Rot;
2126    if (HasROTL)
2127      Rot = DAG.getNode(ISD::ROTL, DL, VT, LHSShiftArg, LHSShiftAmt);
2128    else
2129      Rot = DAG.getNode(ISD::ROTR, DL, VT, LHSShiftArg, RHSShiftAmt);
2130
2131    // If there is an AND of either shifted operand, apply it to the result.
2132    if (LHSMask.getNode() || RHSMask.getNode()) {
2133      APInt Mask = APInt::getAllOnesValue(OpSizeInBits);
2134
2135      if (LHSMask.getNode()) {
2136        APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal);
2137        Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits;
2138      }
2139      if (RHSMask.getNode()) {
2140        APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal);
2141        Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits;
2142      }
2143
2144      Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT));
2145    }
2146
2147    return Rot.getNode();
2148  }
2149
2150  // If there is a mask here, and we have a variable shift, we can't be sure
2151  // that we're masking out the right stuff.
2152  if (LHSMask.getNode() || RHSMask.getNode())
2153    return 0;
2154
2155  // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotl x, y)
2156  // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotr x, (sub 32, y))
2157  if (RHSShiftAmt.getOpcode() == ISD::SUB &&
2158      LHSShiftAmt == RHSShiftAmt.getOperand(1)) {
2159    if (ConstantSDNode *SUBC =
2160          dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
2161      if (SUBC->getAPIntValue() == OpSizeInBits) {
2162        if (HasROTL)
2163          return DAG.getNode(ISD::ROTL, DL, VT,
2164                             LHSShiftArg, LHSShiftAmt).getNode();
2165        else
2166          return DAG.getNode(ISD::ROTR, DL, VT,
2167                             LHSShiftArg, RHSShiftAmt).getNode();
2168      }
2169    }
2170  }
2171
2172  // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y)
2173  // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y))
2174  if (LHSShiftAmt.getOpcode() == ISD::SUB &&
2175      RHSShiftAmt == LHSShiftAmt.getOperand(1)) {
2176    if (ConstantSDNode *SUBC =
2177          dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
2178      if (SUBC->getAPIntValue() == OpSizeInBits) {
2179        if (HasROTR)
2180          return DAG.getNode(ISD::ROTR, DL, VT,
2181                             LHSShiftArg, RHSShiftAmt).getNode();
2182        else
2183          return DAG.getNode(ISD::ROTL, DL, VT,
2184                             LHSShiftArg, LHSShiftAmt).getNode();
2185      }
2186    }
2187  }
2188
2189  // Look for sign/zext/any-extended or truncate cases:
2190  if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
2191       || LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
2192       || LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
2193       || LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
2194      (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
2195       || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
2196       || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
2197       || RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
2198    SDValue LExtOp0 = LHSShiftAmt.getOperand(0);
2199    SDValue RExtOp0 = RHSShiftAmt.getOperand(0);
2200    if (RExtOp0.getOpcode() == ISD::SUB &&
2201        RExtOp0.getOperand(1) == LExtOp0) {
2202      // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
2203      //   (rotl x, y)
2204      // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
2205      //   (rotr x, (sub 32, y))
2206      if (ConstantSDNode *SUBC =
2207            dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0))) {
2208        if (SUBC->getAPIntValue() == OpSizeInBits) {
2209          return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
2210                             LHSShiftArg,
2211                             HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
2212        }
2213      }
2214    } else if (LExtOp0.getOpcode() == ISD::SUB &&
2215               RExtOp0 == LExtOp0.getOperand(1)) {
2216      // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
2217      //   (rotr x, y)
2218      // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
2219      //   (rotl x, (sub 32, y))
2220      if (ConstantSDNode *SUBC =
2221            dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0))) {
2222        if (SUBC->getAPIntValue() == OpSizeInBits) {
2223          return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT,
2224                             LHSShiftArg,
2225                             HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
2226        }
2227      }
2228    }
2229  }
2230
2231  return 0;
2232}
2233
2234SDValue DAGCombiner::visitXOR(SDNode *N) {
2235  SDValue N0 = N->getOperand(0);
2236  SDValue N1 = N->getOperand(1);
2237  SDValue LHS, RHS, CC;
2238  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2239  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2240  EVT VT = N0.getValueType();
2241
2242  // fold vector ops
2243  if (VT.isVector()) {
2244    SDValue FoldedVOp = SimplifyVBinOp(N);
2245    if (FoldedVOp.getNode()) return FoldedVOp;
2246  }
2247
2248  // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
2249  if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
2250    return DAG.getConstant(0, VT);
2251  // fold (xor x, undef) -> undef
2252  if (N0.getOpcode() == ISD::UNDEF)
2253    return N0;
2254  if (N1.getOpcode() == ISD::UNDEF)
2255    return N1;
2256  // fold (xor c1, c2) -> c1^c2
2257  if (N0C && N1C)
2258    return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C);
2259  // canonicalize constant to RHS
2260  if (N0C && !N1C)
2261    return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0);
2262  // fold (xor x, 0) -> x
2263  if (N1C && N1C->isNullValue())
2264    return N0;
2265  // reassociate xor
2266  SDValue RXOR = ReassociateOps(ISD::XOR, N->getDebugLoc(), N0, N1);
2267  if (RXOR.getNode() != 0)
2268    return RXOR;
2269
2270  // fold !(x cc y) -> (x !cc y)
2271  if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) {
2272    bool isInt = LHS.getValueType().isInteger();
2273    ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
2274                                               isInt);
2275
2276    if (!LegalOperations || TLI.isCondCodeLegal(NotCC, LHS.getValueType())) {
2277      switch (N0.getOpcode()) {
2278      default:
2279        llvm_unreachable("Unhandled SetCC Equivalent!");
2280      case ISD::SETCC:
2281        return DAG.getSetCC(N->getDebugLoc(), VT, LHS, RHS, NotCC);
2282      case ISD::SELECT_CC:
2283        return DAG.getSelectCC(N->getDebugLoc(), LHS, RHS, N0.getOperand(2),
2284                               N0.getOperand(3), NotCC);
2285      }
2286    }
2287  }
2288
2289  // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
2290  if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND &&
2291      N0.getNode()->hasOneUse() &&
2292      isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
2293    SDValue V = N0.getOperand(0);
2294    V = DAG.getNode(ISD::XOR, N0.getDebugLoc(), V.getValueType(), V,
2295                    DAG.getConstant(1, V.getValueType()));
2296    AddToWorkList(V.getNode());
2297    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, V);
2298  }
2299
2300  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
2301  if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 &&
2302      (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
2303    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
2304    if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
2305      unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
2306      LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS
2307      RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS
2308      AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
2309      return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS);
2310    }
2311  }
2312  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
2313  if (N1C && N1C->isAllOnesValue() &&
2314      (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
2315    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
2316    if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
2317      unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
2318      LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS
2319      RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS
2320      AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
2321      return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS);
2322    }
2323  }
2324  // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
2325  if (N1C && N0.getOpcode() == ISD::XOR) {
2326    ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0));
2327    ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2328    if (N00C)
2329      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(1),
2330                         DAG.getConstant(N1C->getAPIntValue() ^
2331                                         N00C->getAPIntValue(), VT));
2332    if (N01C)
2333      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(0),
2334                         DAG.getConstant(N1C->getAPIntValue() ^
2335                                         N01C->getAPIntValue(), VT));
2336  }
2337  // fold (xor x, x) -> 0
2338  if (N0 == N1) {
2339    if (!VT.isVector()) {
2340      return DAG.getConstant(0, VT);
2341    } else if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)){
2342      // Produce a vector of zeros.
2343      SDValue El = DAG.getConstant(0, VT.getVectorElementType());
2344      std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
2345      return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
2346                         &Ops[0], Ops.size());
2347    }
2348  }
2349
2350  // Simplify: xor (op x...), (op y...)  -> (op (xor x, y))
2351  if (N0.getOpcode() == N1.getOpcode()) {
2352    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2353    if (Tmp.getNode()) return Tmp;
2354  }
2355
2356  // Simplify the expression using non-local knowledge.
2357  if (!VT.isVector() &&
2358      SimplifyDemandedBits(SDValue(N, 0)))
2359    return SDValue(N, 0);
2360
2361  return SDValue();
2362}
2363
2364/// visitShiftByConstant - Handle transforms common to the three shifts, when
2365/// the shift amount is a constant.
2366SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
2367  SDNode *LHS = N->getOperand(0).getNode();
2368  if (!LHS->hasOneUse()) return SDValue();
2369
2370  // We want to pull some binops through shifts, so that we have (and (shift))
2371  // instead of (shift (and)), likewise for add, or, xor, etc.  This sort of
2372  // thing happens with address calculations, so it's important to canonicalize
2373  // it.
2374  bool HighBitSet = false;  // Can we transform this if the high bit is set?
2375
2376  switch (LHS->getOpcode()) {
2377  default: return SDValue();
2378  case ISD::OR:
2379  case ISD::XOR:
2380    HighBitSet = false; // We can only transform sra if the high bit is clear.
2381    break;
2382  case ISD::AND:
2383    HighBitSet = true;  // We can only transform sra if the high bit is set.
2384    break;
2385  case ISD::ADD:
2386    if (N->getOpcode() != ISD::SHL)
2387      return SDValue(); // only shl(add) not sr[al](add).
2388    HighBitSet = false; // We can only transform sra if the high bit is clear.
2389    break;
2390  }
2391
2392  // We require the RHS of the binop to be a constant as well.
2393  ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
2394  if (!BinOpCst) return SDValue();
2395
2396  // FIXME: disable this unless the input to the binop is a shift by a constant.
2397  // If it is not a shift, it pessimizes some common cases like:
2398  //
2399  //    void foo(int *X, int i) { X[i & 1235] = 1; }
2400  //    int bar(int *X, int i) { return X[i & 255]; }
2401  SDNode *BinOpLHSVal = LHS->getOperand(0).getNode();
2402  if ((BinOpLHSVal->getOpcode() != ISD::SHL &&
2403       BinOpLHSVal->getOpcode() != ISD::SRA &&
2404       BinOpLHSVal->getOpcode() != ISD::SRL) ||
2405      !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
2406    return SDValue();
2407
2408  EVT VT = N->getValueType(0);
2409
2410  // If this is a signed shift right, and the high bit is modified by the
2411  // logical operation, do not perform the transformation. The highBitSet
2412  // boolean indicates the value of the high bit of the constant which would
2413  // cause it to be modified for this operation.
2414  if (N->getOpcode() == ISD::SRA) {
2415    bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
2416    if (BinOpRHSSignSet != HighBitSet)
2417      return SDValue();
2418  }
2419
2420  // Fold the constants, shifting the binop RHS by the shift amount.
2421  SDValue NewRHS = DAG.getNode(N->getOpcode(), LHS->getOperand(1).getDebugLoc(),
2422                               N->getValueType(0),
2423                               LHS->getOperand(1), N->getOperand(1));
2424
2425  // Create the new shift.
2426  SDValue NewShift = DAG.getNode(N->getOpcode(), LHS->getOperand(0).getDebugLoc(),
2427                                 VT, LHS->getOperand(0), N->getOperand(1));
2428
2429  // Create the new binop.
2430  return DAG.getNode(LHS->getOpcode(), N->getDebugLoc(), VT, NewShift, NewRHS);
2431}
2432
2433SDValue DAGCombiner::visitSHL(SDNode *N) {
2434  SDValue N0 = N->getOperand(0);
2435  SDValue N1 = N->getOperand(1);
2436  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2437  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2438  EVT VT = N0.getValueType();
2439  unsigned OpSizeInBits = VT.getSizeInBits();
2440
2441  // fold (shl c1, c2) -> c1<<c2
2442  if (N0C && N1C)
2443    return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C);
2444  // fold (shl 0, x) -> 0
2445  if (N0C && N0C->isNullValue())
2446    return N0;
2447  // fold (shl x, c >= size(x)) -> undef
2448  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
2449    return DAG.getUNDEF(VT);
2450  // fold (shl x, 0) -> x
2451  if (N1C && N1C->isNullValue())
2452    return N0;
2453  // if (shl x, c) is known to be zero, return 0
2454  if (DAG.MaskedValueIsZero(SDValue(N, 0),
2455                            APInt::getAllOnesValue(VT.getSizeInBits())))
2456    return DAG.getConstant(0, VT);
2457  // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
2458  if (N1.getOpcode() == ISD::TRUNCATE &&
2459      N1.getOperand(0).getOpcode() == ISD::AND &&
2460      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2461    SDValue N101 = N1.getOperand(0).getOperand(1);
2462    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2463      EVT TruncVT = N1.getValueType();
2464      SDValue N100 = N1.getOperand(0).getOperand(0);
2465      APInt TruncC = N101C->getAPIntValue();
2466      TruncC.trunc(TruncVT.getSizeInBits());
2467      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
2468                         DAG.getNode(ISD::AND, N->getDebugLoc(), TruncVT,
2469                                     DAG.getNode(ISD::TRUNCATE,
2470                                                 N->getDebugLoc(),
2471                                                 TruncVT, N100),
2472                                     DAG.getConstant(TruncC, TruncVT)));
2473    }
2474  }
2475
2476  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2477    return SDValue(N, 0);
2478
2479  // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
2480  if (N1C && N0.getOpcode() == ISD::SHL &&
2481      N0.getOperand(1).getOpcode() == ISD::Constant) {
2482    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2483    uint64_t c2 = N1C->getZExtValue();
2484    if (c1 + c2 > OpSizeInBits)
2485      return DAG.getConstant(0, VT);
2486    return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0),
2487                       DAG.getConstant(c1 + c2, N1.getValueType()));
2488  }
2489  // fold (shl (srl x, c1), c2) -> (shl (and x, (shl -1, c1)), (sub c2, c1)) or
2490  //                               (srl (and x, (shl -1, c1)), (sub c1, c2))
2491  if (N1C && N0.getOpcode() == ISD::SRL &&
2492      N0.getOperand(1).getOpcode() == ISD::Constant) {
2493    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2494    if (c1 < VT.getSizeInBits()) {
2495      uint64_t c2 = N1C->getZExtValue();
2496      SDValue HiBitsMask =
2497        DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
2498                                              VT.getSizeInBits() - c1),
2499                        VT);
2500      SDValue Mask = DAG.getNode(ISD::AND, N0.getDebugLoc(), VT,
2501                                 N0.getOperand(0),
2502                                 HiBitsMask);
2503      if (c2 > c1)
2504        return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, Mask,
2505                           DAG.getConstant(c2-c1, N1.getValueType()));
2506      else
2507        return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Mask,
2508                           DAG.getConstant(c1-c2, N1.getValueType()));
2509    }
2510  }
2511  // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
2512  if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
2513    SDValue HiBitsMask =
2514      DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
2515                                            VT.getSizeInBits() -
2516                                              N1C->getZExtValue()),
2517                      VT);
2518    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
2519                       HiBitsMask);
2520  }
2521
2522  return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
2523}
2524
2525SDValue DAGCombiner::visitSRA(SDNode *N) {
2526  SDValue N0 = N->getOperand(0);
2527  SDValue N1 = N->getOperand(1);
2528  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2529  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2530  EVT VT = N0.getValueType();
2531
2532  // fold (sra c1, c2) -> (sra c1, c2)
2533  if (N0C && N1C)
2534    return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C);
2535  // fold (sra 0, x) -> 0
2536  if (N0C && N0C->isNullValue())
2537    return N0;
2538  // fold (sra -1, x) -> -1
2539  if (N0C && N0C->isAllOnesValue())
2540    return N0;
2541  // fold (sra x, (setge c, size(x))) -> undef
2542  if (N1C && N1C->getZExtValue() >= VT.getSizeInBits())
2543    return DAG.getUNDEF(VT);
2544  // fold (sra x, 0) -> x
2545  if (N1C && N1C->isNullValue())
2546    return N0;
2547  // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
2548  // sext_inreg.
2549  if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
2550    unsigned LowBits = VT.getSizeInBits() - (unsigned)N1C->getZExtValue();
2551    EVT EVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
2552    if ((!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT)))
2553      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
2554                         N0.getOperand(0), DAG.getValueType(EVT));
2555  }
2556
2557  // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
2558  if (N1C && N0.getOpcode() == ISD::SRA) {
2559    if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2560      unsigned Sum = N1C->getZExtValue() + C1->getZExtValue();
2561      if (Sum >= VT.getSizeInBits()) Sum = VT.getSizeInBits()-1;
2562      return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0.getOperand(0),
2563                         DAG.getConstant(Sum, N1C->getValueType(0)));
2564    }
2565  }
2566
2567  // fold (sra (shl X, m), (sub result_size, n))
2568  // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
2569  // result_size - n != m.
2570  // If truncate is free for the target sext(shl) is likely to result in better
2571  // code.
2572  if (N0.getOpcode() == ISD::SHL) {
2573    // Get the two constanst of the shifts, CN0 = m, CN = n.
2574    const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2575    if (N01C && N1C) {
2576      // Determine what the truncate's result bitsize and type would be.
2577      unsigned VTValSize = VT.getSizeInBits();
2578      EVT TruncVT =
2579        EVT::getIntegerVT(*DAG.getContext(), VTValSize - N1C->getZExtValue());
2580      // Determine the residual right-shift amount.
2581      signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
2582
2583      // If the shift is not a no-op (in which case this should be just a sign
2584      // extend already), the truncated to type is legal, sign_extend is legal
2585      // on that type, and the the truncate to that type is both legal and free,
2586      // perform the transform.
2587      if ((ShiftAmt > 0) &&
2588          TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
2589          TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
2590          TLI.isTruncateFree(VT, TruncVT)) {
2591
2592          SDValue Amt = DAG.getConstant(ShiftAmt, getShiftAmountTy());
2593          SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT,
2594                                      N0.getOperand(0), Amt);
2595          SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), TruncVT,
2596                                      Shift);
2597          return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(),
2598                             N->getValueType(0), Trunc);
2599      }
2600    }
2601  }
2602
2603  // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
2604  if (N1.getOpcode() == ISD::TRUNCATE &&
2605      N1.getOperand(0).getOpcode() == ISD::AND &&
2606      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2607    SDValue N101 = N1.getOperand(0).getOperand(1);
2608    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2609      EVT TruncVT = N1.getValueType();
2610      SDValue N100 = N1.getOperand(0).getOperand(0);
2611      APInt TruncC = N101C->getAPIntValue();
2612      TruncC.trunc(TruncVT.getSizeInBits());
2613      return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
2614                         DAG.getNode(ISD::AND, N->getDebugLoc(),
2615                                     TruncVT,
2616                                     DAG.getNode(ISD::TRUNCATE,
2617                                                 N->getDebugLoc(),
2618                                                 TruncVT, N100),
2619                                     DAG.getConstant(TruncC, TruncVT)));
2620    }
2621  }
2622
2623  // Simplify, based on bits shifted out of the LHS.
2624  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2625    return SDValue(N, 0);
2626
2627
2628  // If the sign bit is known to be zero, switch this to a SRL.
2629  if (DAG.SignBitIsZero(N0))
2630    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, N1);
2631
2632  return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
2633}
2634
2635SDValue DAGCombiner::visitSRL(SDNode *N) {
2636  SDValue N0 = N->getOperand(0);
2637  SDValue N1 = N->getOperand(1);
2638  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2639  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2640  EVT VT = N0.getValueType();
2641  unsigned OpSizeInBits = VT.getSizeInBits();
2642
2643  // fold (srl c1, c2) -> c1 >>u c2
2644  if (N0C && N1C)
2645    return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C);
2646  // fold (srl 0, x) -> 0
2647  if (N0C && N0C->isNullValue())
2648    return N0;
2649  // fold (srl x, c >= size(x)) -> undef
2650  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
2651    return DAG.getUNDEF(VT);
2652  // fold (srl x, 0) -> x
2653  if (N1C && N1C->isNullValue())
2654    return N0;
2655  // if (srl x, c) is known to be zero, return 0
2656  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
2657                                   APInt::getAllOnesValue(OpSizeInBits)))
2658    return DAG.getConstant(0, VT);
2659
2660  // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
2661  if (N1C && N0.getOpcode() == ISD::SRL &&
2662      N0.getOperand(1).getOpcode() == ISD::Constant) {
2663    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2664    uint64_t c2 = N1C->getZExtValue();
2665    if (c1 + c2 > OpSizeInBits)
2666      return DAG.getConstant(0, VT);
2667    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
2668                       DAG.getConstant(c1 + c2, N1.getValueType()));
2669  }
2670
2671  // fold (srl (anyextend x), c) -> (anyextend (srl x, c))
2672  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
2673    // Shifting in all undef bits?
2674    EVT SmallVT = N0.getOperand(0).getValueType();
2675    if (N1C->getZExtValue() >= SmallVT.getSizeInBits())
2676      return DAG.getUNDEF(VT);
2677
2678    SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT,
2679                                     N0.getOperand(0), N1);
2680    AddToWorkList(SmallShift.getNode());
2681    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift);
2682  }
2683
2684  // fold (srl (sra X, Y), 31) -> (srl X, 31).  This srl only looks at the sign
2685  // bit, which is unmodified by sra.
2686  if (N1C && N1C->getZExtValue() + 1 == VT.getSizeInBits()) {
2687    if (N0.getOpcode() == ISD::SRA)
2688      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), N1);
2689  }
2690
2691  // fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
2692  if (N1C && N0.getOpcode() == ISD::CTLZ &&
2693      N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
2694    APInt KnownZero, KnownOne;
2695    APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
2696    DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
2697
2698    // If any of the input bits are KnownOne, then the input couldn't be all
2699    // zeros, thus the result of the srl will always be zero.
2700    if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT);
2701
2702    // If all of the bits input the to ctlz node are known to be zero, then
2703    // the result of the ctlz is "32" and the result of the shift is one.
2704    APInt UnknownBits = ~KnownZero & Mask;
2705    if (UnknownBits == 0) return DAG.getConstant(1, VT);
2706
2707    // Otherwise, check to see if there is exactly one bit input to the ctlz.
2708    if ((UnknownBits & (UnknownBits - 1)) == 0) {
2709      // Okay, we know that only that the single bit specified by UnknownBits
2710      // could be set on input to the CTLZ node. If this bit is set, the SRL
2711      // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
2712      // to an SRL/XOR pair, which is likely to simplify more.
2713      unsigned ShAmt = UnknownBits.countTrailingZeros();
2714      SDValue Op = N0.getOperand(0);
2715
2716      if (ShAmt) {
2717        Op = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, Op,
2718                         DAG.getConstant(ShAmt, getShiftAmountTy()));
2719        AddToWorkList(Op.getNode());
2720      }
2721
2722      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
2723                         Op, DAG.getConstant(1, VT));
2724    }
2725  }
2726
2727  // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
2728  if (N1.getOpcode() == ISD::TRUNCATE &&
2729      N1.getOperand(0).getOpcode() == ISD::AND &&
2730      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2731    SDValue N101 = N1.getOperand(0).getOperand(1);
2732    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2733      EVT TruncVT = N1.getValueType();
2734      SDValue N100 = N1.getOperand(0).getOperand(0);
2735      APInt TruncC = N101C->getAPIntValue();
2736      TruncC.trunc(TruncVT.getSizeInBits());
2737      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
2738                         DAG.getNode(ISD::AND, N->getDebugLoc(),
2739                                     TruncVT,
2740                                     DAG.getNode(ISD::TRUNCATE,
2741                                                 N->getDebugLoc(),
2742                                                 TruncVT, N100),
2743                                     DAG.getConstant(TruncC, TruncVT)));
2744    }
2745  }
2746
2747  // fold operands of srl based on knowledge that the low bits are not
2748  // demanded.
2749  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2750    return SDValue(N, 0);
2751
2752  return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
2753}
2754
2755SDValue DAGCombiner::visitCTLZ(SDNode *N) {
2756  SDValue N0 = N->getOperand(0);
2757  EVT VT = N->getValueType(0);
2758
2759  // fold (ctlz c1) -> c2
2760  if (isa<ConstantSDNode>(N0))
2761    return DAG.getNode(ISD::CTLZ, N->getDebugLoc(), VT, N0);
2762  return SDValue();
2763}
2764
2765SDValue DAGCombiner::visitCTTZ(SDNode *N) {
2766  SDValue N0 = N->getOperand(0);
2767  EVT VT = N->getValueType(0);
2768
2769  // fold (cttz c1) -> c2
2770  if (isa<ConstantSDNode>(N0))
2771    return DAG.getNode(ISD::CTTZ, N->getDebugLoc(), VT, N0);
2772  return SDValue();
2773}
2774
2775SDValue DAGCombiner::visitCTPOP(SDNode *N) {
2776  SDValue N0 = N->getOperand(0);
2777  EVT VT = N->getValueType(0);
2778
2779  // fold (ctpop c1) -> c2
2780  if (isa<ConstantSDNode>(N0))
2781    return DAG.getNode(ISD::CTPOP, N->getDebugLoc(), VT, N0);
2782  return SDValue();
2783}
2784
2785SDValue DAGCombiner::visitSELECT(SDNode *N) {
2786  SDValue N0 = N->getOperand(0);
2787  SDValue N1 = N->getOperand(1);
2788  SDValue N2 = N->getOperand(2);
2789  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2790  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2791  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2792  EVT VT = N->getValueType(0);
2793  EVT VT0 = N0.getValueType();
2794
2795  // fold (select C, X, X) -> X
2796  if (N1 == N2)
2797    return N1;
2798  // fold (select true, X, Y) -> X
2799  if (N0C && !N0C->isNullValue())
2800    return N1;
2801  // fold (select false, X, Y) -> Y
2802  if (N0C && N0C->isNullValue())
2803    return N2;
2804  // fold (select C, 1, X) -> (or C, X)
2805  if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
2806    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
2807  // fold (select C, 0, 1) -> (xor C, 1)
2808  if (VT.isInteger() &&
2809      (VT0 == MVT::i1 ||
2810       (VT0.isInteger() &&
2811        TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent)) &&
2812      N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
2813    SDValue XORNode;
2814    if (VT == VT0)
2815      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT0,
2816                         N0, DAG.getConstant(1, VT0));
2817    XORNode = DAG.getNode(ISD::XOR, N0.getDebugLoc(), VT0,
2818                          N0, DAG.getConstant(1, VT0));
2819    AddToWorkList(XORNode.getNode());
2820    if (VT.bitsGT(VT0))
2821      return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, XORNode);
2822    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, XORNode);
2823  }
2824  // fold (select C, 0, X) -> (and (not C), X)
2825  if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
2826    SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
2827    AddToWorkList(NOTNode.getNode());
2828    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, NOTNode, N2);
2829  }
2830  // fold (select C, X, 1) -> (or (not C), X)
2831  if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
2832    SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
2833    AddToWorkList(NOTNode.getNode());
2834    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, NOTNode, N1);
2835  }
2836  // fold (select C, X, 0) -> (and C, X)
2837  if (VT == MVT::i1 && N2C && N2C->isNullValue())
2838    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
2839  // fold (select X, X, Y) -> (or X, Y)
2840  // fold (select X, 1, Y) -> (or X, Y)
2841  if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
2842    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
2843  // fold (select X, Y, X) -> (and X, Y)
2844  // fold (select X, Y, 0) -> (and X, Y)
2845  if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
2846    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
2847
2848  // If we can fold this based on the true/false value, do so.
2849  if (SimplifySelectOps(N, N1, N2))
2850    return SDValue(N, 0);  // Don't revisit N.
2851
2852  // fold selects based on a setcc into other things, such as min/max/abs
2853  if (N0.getOpcode() == ISD::SETCC) {
2854    // FIXME:
2855    // Check against MVT::Other for SELECT_CC, which is a workaround for targets
2856    // having to say they don't support SELECT_CC on every type the DAG knows
2857    // about, since there is no way to mark an opcode illegal at all value types
2858    if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) &&
2859        TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))
2860      return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT,
2861                         N0.getOperand(0), N0.getOperand(1),
2862                         N1, N2, N0.getOperand(2));
2863    return SimplifySelect(N->getDebugLoc(), N0, N1, N2);
2864  }
2865
2866  return SDValue();
2867}
2868
2869SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
2870  SDValue N0 = N->getOperand(0);
2871  SDValue N1 = N->getOperand(1);
2872  SDValue N2 = N->getOperand(2);
2873  SDValue N3 = N->getOperand(3);
2874  SDValue N4 = N->getOperand(4);
2875  ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
2876
2877  // fold select_cc lhs, rhs, x, x, cc -> x
2878  if (N2 == N3)
2879    return N2;
2880
2881  // Determine if the condition we're dealing with is constant
2882  SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()),
2883                              N0, N1, CC, N->getDebugLoc(), false);
2884  if (SCC.getNode()) AddToWorkList(SCC.getNode());
2885
2886  if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
2887    if (!SCCC->isNullValue())
2888      return N2;    // cond always true -> true val
2889    else
2890      return N3;    // cond always false -> false val
2891  }
2892
2893  // Fold to a simpler select_cc
2894  if (SCC.getNode() && SCC.getOpcode() == ISD::SETCC)
2895    return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N2.getValueType(),
2896                       SCC.getOperand(0), SCC.getOperand(1), N2, N3,
2897                       SCC.getOperand(2));
2898
2899  // If we can fold this based on the true/false value, do so.
2900  if (SimplifySelectOps(N, N2, N3))
2901    return SDValue(N, 0);  // Don't revisit N.
2902
2903  // fold select_cc into other things, such as min/max/abs
2904  return SimplifySelectCC(N->getDebugLoc(), N0, N1, N2, N3, CC);
2905}
2906
2907SDValue DAGCombiner::visitSETCC(SDNode *N) {
2908  return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1),
2909                       cast<CondCodeSDNode>(N->getOperand(2))->get(),
2910                       N->getDebugLoc());
2911}
2912
2913// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
2914// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
2915// transformation. Returns true if extension are possible and the above
2916// mentioned transformation is profitable.
2917static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
2918                                    unsigned ExtOpc,
2919                                    SmallVector<SDNode*, 4> &ExtendNodes,
2920                                    const TargetLowering &TLI) {
2921  bool HasCopyToRegUses = false;
2922  bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
2923  for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
2924                            UE = N0.getNode()->use_end();
2925       UI != UE; ++UI) {
2926    SDNode *User = *UI;
2927    if (User == N)
2928      continue;
2929    if (UI.getUse().getResNo() != N0.getResNo())
2930      continue;
2931    // FIXME: Only extend SETCC N, N and SETCC N, c for now.
2932    if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
2933      ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
2934      if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
2935        // Sign bits will be lost after a zext.
2936        return false;
2937      bool Add = false;
2938      for (unsigned i = 0; i != 2; ++i) {
2939        SDValue UseOp = User->getOperand(i);
2940        if (UseOp == N0)
2941          continue;
2942        if (!isa<ConstantSDNode>(UseOp))
2943          return false;
2944        Add = true;
2945      }
2946      if (Add)
2947        ExtendNodes.push_back(User);
2948      continue;
2949    }
2950    // If truncates aren't free and there are users we can't
2951    // extend, it isn't worthwhile.
2952    if (!isTruncFree)
2953      return false;
2954    // Remember if this value is live-out.
2955    if (User->getOpcode() == ISD::CopyToReg)
2956      HasCopyToRegUses = true;
2957  }
2958
2959  if (HasCopyToRegUses) {
2960    bool BothLiveOut = false;
2961    for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
2962         UI != UE; ++UI) {
2963      SDUse &Use = UI.getUse();
2964      if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
2965        BothLiveOut = true;
2966        break;
2967      }
2968    }
2969    if (BothLiveOut)
2970      // Both unextended and extended values are live out. There had better be
2971      // good a reason for the transformation.
2972      return ExtendNodes.size();
2973  }
2974  return true;
2975}
2976
2977SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
2978  SDValue N0 = N->getOperand(0);
2979  EVT VT = N->getValueType(0);
2980
2981  // fold (sext c1) -> c1
2982  if (isa<ConstantSDNode>(N0))
2983    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N0);
2984
2985  // fold (sext (sext x)) -> (sext x)
2986  // fold (sext (aext x)) -> (sext x)
2987  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
2988    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT,
2989                       N0.getOperand(0));
2990
2991  if (N0.getOpcode() == ISD::TRUNCATE) {
2992    // fold (sext (truncate (load x))) -> (sext (smaller load x))
2993    // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
2994    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
2995    if (NarrowLoad.getNode()) {
2996      if (NarrowLoad.getNode() != N0.getNode())
2997        CombineTo(N0.getNode(), NarrowLoad);
2998      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
2999    }
3000
3001    // See if the value being truncated is already sign extended.  If so, just
3002    // eliminate the trunc/sext pair.
3003    SDValue Op = N0.getOperand(0);
3004    unsigned OpBits   = Op.getValueType().getSizeInBits();
3005    unsigned MidBits  = N0.getValueType().getSizeInBits();
3006    unsigned DestBits = VT.getSizeInBits();
3007    unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
3008
3009    if (OpBits == DestBits) {
3010      // Op is i32, Mid is i8, and Dest is i32.  If Op has more than 24 sign
3011      // bits, it is already ready.
3012      if (NumSignBits > DestBits-MidBits)
3013        return Op;
3014    } else if (OpBits < DestBits) {
3015      // Op is i32, Mid is i8, and Dest is i64.  If Op has more than 24 sign
3016      // bits, just sext from i32.
3017      if (NumSignBits > OpBits-MidBits)
3018        return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, Op);
3019    } else {
3020      // Op is i64, Mid is i8, and Dest is i32.  If Op has more than 56 sign
3021      // bits, just truncate to i32.
3022      if (NumSignBits > OpBits-MidBits)
3023        return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
3024    }
3025
3026    // fold (sext (truncate x)) -> (sextinreg x).
3027    if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
3028                                                 N0.getValueType())) {
3029      if (Op.getValueType().bitsLT(VT))
3030        Op = DAG.getNode(ISD::ANY_EXTEND, N0.getDebugLoc(), VT, Op);
3031      else if (Op.getValueType().bitsGT(VT))
3032        Op = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), VT, Op);
3033      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, Op,
3034                         DAG.getValueType(N0.getValueType()));
3035    }
3036  }
3037
3038  // fold (sext (load x)) -> (sext (truncate (sextload x)))
3039  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3040      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3041       TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
3042    bool DoXform = true;
3043    SmallVector<SDNode*, 4> SetCCs;
3044    if (!N0.hasOneUse())
3045      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
3046    if (DoXform) {
3047      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3048      SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3049                                       LN0->getChain(),
3050                                       LN0->getBasePtr(), LN0->getSrcValue(),
3051                                       LN0->getSrcValueOffset(),
3052                                       N0.getValueType(),
3053                                       LN0->isVolatile(), LN0->getAlignment());
3054      CombineTo(N, ExtLoad);
3055      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3056                                  N0.getValueType(), ExtLoad);
3057      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3058
3059      // Extend SetCC uses if necessary.
3060      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3061        SDNode *SetCC = SetCCs[i];
3062        SmallVector<SDValue, 4> Ops;
3063
3064        for (unsigned j = 0; j != 2; ++j) {
3065          SDValue SOp = SetCC->getOperand(j);
3066          if (SOp == Trunc)
3067            Ops.push_back(ExtLoad);
3068          else
3069            Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND,
3070                                      N->getDebugLoc(), VT, SOp));
3071        }
3072
3073        Ops.push_back(SetCC->getOperand(2));
3074        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3075                                     SetCC->getValueType(0),
3076                                     &Ops[0], Ops.size()));
3077      }
3078
3079      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3080    }
3081  }
3082
3083  // fold (sext (sextload x)) -> (sext (truncate (sextload x)))
3084  // fold (sext ( extload x)) -> (sext (truncate (sextload x)))
3085  if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
3086      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
3087    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3088    EVT MemVT = LN0->getMemoryVT();
3089    if ((!LegalOperations && !LN0->isVolatile()) ||
3090        TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
3091      SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3092                                       LN0->getChain(),
3093                                       LN0->getBasePtr(), LN0->getSrcValue(),
3094                                       LN0->getSrcValueOffset(), MemVT,
3095                                       LN0->isVolatile(), LN0->getAlignment());
3096      CombineTo(N, ExtLoad);
3097      CombineTo(N0.getNode(),
3098                DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3099                            N0.getValueType(), ExtLoad),
3100                ExtLoad.getValue(1));
3101      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3102    }
3103  }
3104
3105  if (N0.getOpcode() == ISD::SETCC) {
3106    // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
3107    if (VT.isVector() &&
3108        // We know that the # elements of the results is the same as the
3109        // # elements of the compare (and the # elements of the compare result
3110        // for that matter).  Check to see that they are the same size.  If so,
3111        // we know that the element size of the sext'd result matches the
3112        // element size of the compare operands.
3113        VT.getSizeInBits() == N0.getOperand(0).getValueType().getSizeInBits() &&
3114
3115        // Only do this before legalize for now.
3116        !LegalOperations) {
3117      return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
3118                           N0.getOperand(1),
3119                           cast<CondCodeSDNode>(N0.getOperand(2))->get());
3120    }
3121
3122    // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
3123    SDValue NegOne =
3124      DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3125    SDValue SCC =
3126      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3127                       NegOne, DAG.getConstant(0, VT),
3128                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3129    if (SCC.getNode()) return SCC;
3130  }
3131
3132
3133
3134  // fold (sext x) -> (zext x) if the sign bit is known zero.
3135  if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
3136      DAG.SignBitIsZero(N0))
3137    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0);
3138
3139  return SDValue();
3140}
3141
3142SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
3143  SDValue N0 = N->getOperand(0);
3144  EVT VT = N->getValueType(0);
3145
3146  // fold (zext c1) -> c1
3147  if (isa<ConstantSDNode>(N0))
3148    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0);
3149  // fold (zext (zext x)) -> (zext x)
3150  // fold (zext (aext x)) -> (zext x)
3151  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
3152    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT,
3153                       N0.getOperand(0));
3154
3155  // fold (zext (truncate (load x))) -> (zext (smaller load x))
3156  // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
3157  if (N0.getOpcode() == ISD::TRUNCATE) {
3158    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3159    if (NarrowLoad.getNode()) {
3160      if (NarrowLoad.getNode() != N0.getNode())
3161        CombineTo(N0.getNode(), NarrowLoad);
3162      return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
3163    }
3164  }
3165
3166  // fold (zext (truncate x)) -> (and x, mask)
3167  if (N0.getOpcode() == ISD::TRUNCATE &&
3168      (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
3169    SDValue Op = N0.getOperand(0);
3170    if (Op.getValueType().bitsLT(VT)) {
3171      Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
3172    } else if (Op.getValueType().bitsGT(VT)) {
3173      Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
3174    }
3175    return DAG.getZeroExtendInReg(Op, N->getDebugLoc(), N0.getValueType());
3176  }
3177
3178  // Fold (zext (and (trunc x), cst)) -> (and x, cst),
3179  // if either of the casts is not free.
3180  if (N0.getOpcode() == ISD::AND &&
3181      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
3182      N0.getOperand(1).getOpcode() == ISD::Constant &&
3183      (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
3184                           N0.getValueType()) ||
3185       !TLI.isZExtFree(N0.getValueType(), VT))) {
3186    SDValue X = N0.getOperand(0).getOperand(0);
3187    if (X.getValueType().bitsLT(VT)) {
3188      X = DAG.getNode(ISD::ANY_EXTEND, X.getDebugLoc(), VT, X);
3189    } else if (X.getValueType().bitsGT(VT)) {
3190      X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
3191    }
3192    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
3193    Mask.zext(VT.getSizeInBits());
3194    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3195                       X, DAG.getConstant(Mask, VT));
3196  }
3197
3198  // fold (zext (load x)) -> (zext (truncate (zextload x)))
3199  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3200      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3201       TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
3202    bool DoXform = true;
3203    SmallVector<SDNode*, 4> SetCCs;
3204    if (!N0.hasOneUse())
3205      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
3206    if (DoXform) {
3207      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3208      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
3209                                       LN0->getChain(),
3210                                       LN0->getBasePtr(), LN0->getSrcValue(),
3211                                       LN0->getSrcValueOffset(),
3212                                       N0.getValueType(),
3213                                       LN0->isVolatile(), LN0->getAlignment());
3214      CombineTo(N, ExtLoad);
3215      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3216                                  N0.getValueType(), ExtLoad);
3217      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3218
3219      // Extend SetCC uses if necessary.
3220      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3221        SDNode *SetCC = SetCCs[i];
3222        SmallVector<SDValue, 4> Ops;
3223
3224        for (unsigned j = 0; j != 2; ++j) {
3225          SDValue SOp = SetCC->getOperand(j);
3226          if (SOp == Trunc)
3227            Ops.push_back(ExtLoad);
3228          else
3229            Ops.push_back(DAG.getNode(ISD::ZERO_EXTEND,
3230                                      N->getDebugLoc(), VT, SOp));
3231        }
3232
3233        Ops.push_back(SetCC->getOperand(2));
3234        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3235                                     SetCC->getValueType(0),
3236                                     &Ops[0], Ops.size()));
3237      }
3238
3239      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3240    }
3241  }
3242
3243  // fold (zext (zextload x)) -> (zext (truncate (zextload x)))
3244  // fold (zext ( extload x)) -> (zext (truncate (zextload x)))
3245  if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
3246      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
3247    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3248    EVT MemVT = LN0->getMemoryVT();
3249    if ((!LegalOperations && !LN0->isVolatile()) ||
3250        TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
3251      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
3252                                       LN0->getChain(),
3253                                       LN0->getBasePtr(), LN0->getSrcValue(),
3254                                       LN0->getSrcValueOffset(), MemVT,
3255                                       LN0->isVolatile(), LN0->getAlignment());
3256      CombineTo(N, ExtLoad);
3257      CombineTo(N0.getNode(),
3258                DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), N0.getValueType(),
3259                            ExtLoad),
3260                ExtLoad.getValue(1));
3261      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3262    }
3263  }
3264
3265  // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
3266  if (N0.getOpcode() == ISD::SETCC) {
3267    SDValue SCC =
3268      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3269                       DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3270                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3271    if (SCC.getNode()) return SCC;
3272  }
3273
3274  return SDValue();
3275}
3276
3277SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
3278  SDValue N0 = N->getOperand(0);
3279  EVT VT = N->getValueType(0);
3280
3281  // fold (aext c1) -> c1
3282  if (isa<ConstantSDNode>(N0))
3283    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, N0);
3284  // fold (aext (aext x)) -> (aext x)
3285  // fold (aext (zext x)) -> (zext x)
3286  // fold (aext (sext x)) -> (sext x)
3287  if (N0.getOpcode() == ISD::ANY_EXTEND  ||
3288      N0.getOpcode() == ISD::ZERO_EXTEND ||
3289      N0.getOpcode() == ISD::SIGN_EXTEND)
3290    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, N0.getOperand(0));
3291
3292  // fold (aext (truncate (load x))) -> (aext (smaller load x))
3293  // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
3294  if (N0.getOpcode() == ISD::TRUNCATE) {
3295    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3296    if (NarrowLoad.getNode()) {
3297      if (NarrowLoad.getNode() != N0.getNode())
3298        CombineTo(N0.getNode(), NarrowLoad);
3299      return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
3300    }
3301  }
3302
3303  // fold (aext (truncate x))
3304  if (N0.getOpcode() == ISD::TRUNCATE) {
3305    SDValue TruncOp = N0.getOperand(0);
3306    if (TruncOp.getValueType() == VT)
3307      return TruncOp; // x iff x size == zext size.
3308    if (TruncOp.getValueType().bitsGT(VT))
3309      return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, TruncOp);
3310    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, TruncOp);
3311  }
3312
3313  // Fold (aext (and (trunc x), cst)) -> (and x, cst)
3314  // if the trunc is not free.
3315  if (N0.getOpcode() == ISD::AND &&
3316      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
3317      N0.getOperand(1).getOpcode() == ISD::Constant &&
3318      !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
3319                          N0.getValueType())) {
3320    SDValue X = N0.getOperand(0).getOperand(0);
3321    if (X.getValueType().bitsLT(VT)) {
3322      X = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, X);
3323    } else if (X.getValueType().bitsGT(VT)) {
3324      X = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, X);
3325    }
3326    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
3327    Mask.zext(VT.getSizeInBits());
3328    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3329                       X, DAG.getConstant(Mask, VT));
3330  }
3331
3332  // fold (aext (load x)) -> (aext (truncate (extload x)))
3333  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3334      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3335       TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
3336    bool DoXform = true;
3337    SmallVector<SDNode*, 4> SetCCs;
3338    if (!N0.hasOneUse())
3339      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
3340    if (DoXform) {
3341      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3342      SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
3343                                       LN0->getChain(),
3344                                       LN0->getBasePtr(), LN0->getSrcValue(),
3345                                       LN0->getSrcValueOffset(),
3346                                       N0.getValueType(),
3347                                       LN0->isVolatile(), LN0->getAlignment());
3348      CombineTo(N, ExtLoad);
3349      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3350                                  N0.getValueType(), ExtLoad);
3351      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3352
3353      // Extend SetCC uses if necessary.
3354      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3355        SDNode *SetCC = SetCCs[i];
3356        SmallVector<SDValue, 4> Ops;
3357
3358        for (unsigned j = 0; j != 2; ++j) {
3359          SDValue SOp = SetCC->getOperand(j);
3360          if (SOp == Trunc)
3361            Ops.push_back(ExtLoad);
3362          else
3363            Ops.push_back(DAG.getNode(ISD::ANY_EXTEND,
3364                                      N->getDebugLoc(), VT, SOp));
3365        }
3366
3367        Ops.push_back(SetCC->getOperand(2));
3368        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3369                                     SetCC->getValueType(0),
3370                                     &Ops[0], Ops.size()));
3371      }
3372
3373      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3374    }
3375  }
3376
3377  // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
3378  // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
3379  // fold (aext ( extload x)) -> (aext (truncate (extload  x)))
3380  if (N0.getOpcode() == ISD::LOAD &&
3381      !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
3382      N0.hasOneUse()) {
3383    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3384    EVT MemVT = LN0->getMemoryVT();
3385    SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(),
3386                                     VT, LN0->getChain(), LN0->getBasePtr(),
3387                                     LN0->getSrcValue(),
3388                                     LN0->getSrcValueOffset(), MemVT,
3389                                     LN0->isVolatile(), LN0->getAlignment());
3390    CombineTo(N, ExtLoad);
3391    CombineTo(N0.getNode(),
3392              DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3393                          N0.getValueType(), ExtLoad),
3394              ExtLoad.getValue(1));
3395    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3396  }
3397
3398  // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
3399  if (N0.getOpcode() == ISD::SETCC) {
3400    SDValue SCC =
3401      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3402                       DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3403                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3404    if (SCC.getNode())
3405      return SCC;
3406  }
3407
3408  return SDValue();
3409}
3410
3411/// GetDemandedBits - See if the specified operand can be simplified with the
3412/// knowledge that only the bits specified by Mask are used.  If so, return the
3413/// simpler operand, otherwise return a null SDValue.
3414SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
3415  switch (V.getOpcode()) {
3416  default: break;
3417  case ISD::OR:
3418  case ISD::XOR:
3419    // If the LHS or RHS don't contribute bits to the or, drop them.
3420    if (DAG.MaskedValueIsZero(V.getOperand(0), Mask))
3421      return V.getOperand(1);
3422    if (DAG.MaskedValueIsZero(V.getOperand(1), Mask))
3423      return V.getOperand(0);
3424    break;
3425  case ISD::SRL:
3426    // Only look at single-use SRLs.
3427    if (!V.getNode()->hasOneUse())
3428      break;
3429    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3430      // See if we can recursively simplify the LHS.
3431      unsigned Amt = RHSC->getZExtValue();
3432
3433      // Watch out for shift count overflow though.
3434      if (Amt >= Mask.getBitWidth()) break;
3435      APInt NewMask = Mask << Amt;
3436      SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
3437      if (SimplifyLHS.getNode())
3438        return DAG.getNode(ISD::SRL, V.getDebugLoc(), V.getValueType(),
3439                           SimplifyLHS, V.getOperand(1));
3440    }
3441  }
3442  return SDValue();
3443}
3444
3445/// ReduceLoadWidth - If the result of a wider load is shifted to right of N
3446/// bits and then truncated to a narrower type and where N is a multiple
3447/// of number of bits of the narrower type, transform it to a narrower load
3448/// from address + N / num of bits of new type. If the result is to be
3449/// extended, also fold the extension to form a extending load.
3450SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
3451  unsigned Opc = N->getOpcode();
3452  ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
3453  SDValue N0 = N->getOperand(0);
3454  EVT VT = N->getValueType(0);
3455  EVT ExtVT = VT;
3456
3457  // This transformation isn't valid for vector loads.
3458  if (VT.isVector())
3459    return SDValue();
3460
3461  // Special case: SIGN_EXTEND_INREG is basically truncating to EVT then
3462  // extended to VT.
3463  if (Opc == ISD::SIGN_EXTEND_INREG) {
3464    ExtType = ISD::SEXTLOAD;
3465    ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
3466    if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
3467      return SDValue();
3468  }
3469
3470  unsigned EVTBits = ExtVT.getSizeInBits();
3471  unsigned ShAmt = 0;
3472  if (N0.getOpcode() == ISD::SRL && N0.hasOneUse() && ExtVT.isRound()) {
3473    if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3474      ShAmt = N01->getZExtValue();
3475      // Is the shift amount a multiple of size of VT?
3476      if ((ShAmt & (EVTBits-1)) == 0) {
3477        N0 = N0.getOperand(0);
3478        // Is the load width a multiple of size of VT?
3479        if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
3480          return SDValue();
3481      }
3482    }
3483  }
3484
3485  // Do not generate loads of non-round integer types since these can
3486  // be expensive (and would be wrong if the type is not byte sized).
3487  if (isa<LoadSDNode>(N0) && N0.hasOneUse() && ExtVT.isRound() &&
3488      cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() > EVTBits &&
3489      // Do not change the width of a volatile load.
3490      !cast<LoadSDNode>(N0)->isVolatile()) {
3491    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3492    EVT PtrType = N0.getOperand(1).getValueType();
3493
3494    // For big endian targets, we need to adjust the offset to the pointer to
3495    // load the correct bytes.
3496    if (TLI.isBigEndian()) {
3497      unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
3498      unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
3499      ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
3500    }
3501
3502    uint64_t PtrOff =  ShAmt / 8;
3503    unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
3504    SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(),
3505                                 PtrType, LN0->getBasePtr(),
3506                                 DAG.getConstant(PtrOff, PtrType));
3507    AddToWorkList(NewPtr.getNode());
3508
3509    SDValue Load = (ExtType == ISD::NON_EXTLOAD)
3510      ? DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
3511                    LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
3512                    LN0->isVolatile(), NewAlign)
3513      : DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(), NewPtr,
3514                       LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
3515                       ExtVT, LN0->isVolatile(), NewAlign);
3516
3517    // Replace the old load's chain with the new load's chain.
3518    WorkListRemover DeadNodes(*this);
3519    DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1),
3520                                  &DeadNodes);
3521
3522    // Return the new loaded value.
3523    return Load;
3524  }
3525
3526  return SDValue();
3527}
3528
3529SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
3530  SDValue N0 = N->getOperand(0);
3531  SDValue N1 = N->getOperand(1);
3532  EVT VT = N->getValueType(0);
3533  EVT EVT = cast<VTSDNode>(N1)->getVT();
3534  unsigned VTBits = VT.getSizeInBits();
3535  unsigned EVTBits = EVT.getSizeInBits();
3536
3537  // fold (sext_in_reg c1) -> c1
3538  if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
3539    return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, N0, N1);
3540
3541  // If the input is already sign extended, just drop the extension.
3542  if (DAG.ComputeNumSignBits(N0) >= VT.getSizeInBits()-EVTBits+1)
3543    return N0;
3544
3545  // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
3546  if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3547      EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) {
3548    return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
3549                       N0.getOperand(0), N1);
3550  }
3551
3552  // fold (sext_in_reg (sext x)) -> (sext x)
3553  // fold (sext_in_reg (aext x)) -> (sext x)
3554  // if x is small enough.
3555  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
3556    SDValue N00 = N0.getOperand(0);
3557    if (N00.getValueType().getSizeInBits() < EVTBits)
3558      return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N00, N1);
3559  }
3560
3561  // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
3562  if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits)))
3563    return DAG.getZeroExtendInReg(N0, N->getDebugLoc(), EVT);
3564
3565  // fold operands of sext_in_reg based on knowledge that the top bits are not
3566  // demanded.
3567  if (SimplifyDemandedBits(SDValue(N, 0)))
3568    return SDValue(N, 0);
3569
3570  // fold (sext_in_reg (load x)) -> (smaller sextload x)
3571  // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
3572  SDValue NarrowLoad = ReduceLoadWidth(N);
3573  if (NarrowLoad.getNode())
3574    return NarrowLoad;
3575
3576  // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
3577  // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
3578  // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
3579  if (N0.getOpcode() == ISD::SRL) {
3580    if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
3581      if (ShAmt->getZExtValue()+EVTBits <= VT.getSizeInBits()) {
3582        // We can turn this into an SRA iff the input to the SRL is already sign
3583        // extended enough.
3584        unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
3585        if (VT.getSizeInBits()-(ShAmt->getZExtValue()+EVTBits) < InSignBits)
3586          return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT,
3587                             N0.getOperand(0), N0.getOperand(1));
3588      }
3589  }
3590
3591  // fold (sext_inreg (extload x)) -> (sextload x)
3592  if (ISD::isEXTLoad(N0.getNode()) &&
3593      ISD::isUNINDEXEDLoad(N0.getNode()) &&
3594      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
3595      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3596       TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
3597    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3598    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3599                                     LN0->getChain(),
3600                                     LN0->getBasePtr(), LN0->getSrcValue(),
3601                                     LN0->getSrcValueOffset(), EVT,
3602                                     LN0->isVolatile(), LN0->getAlignment());
3603    CombineTo(N, ExtLoad);
3604    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
3605    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3606  }
3607  // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
3608  if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
3609      N0.hasOneUse() &&
3610      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
3611      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3612       TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
3613    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3614    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3615                                     LN0->getChain(),
3616                                     LN0->getBasePtr(), LN0->getSrcValue(),
3617                                     LN0->getSrcValueOffset(), EVT,
3618                                     LN0->isVolatile(), LN0->getAlignment());
3619    CombineTo(N, ExtLoad);
3620    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
3621    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3622  }
3623  return SDValue();
3624}
3625
3626SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
3627  SDValue N0 = N->getOperand(0);
3628  EVT VT = N->getValueType(0);
3629
3630  // noop truncate
3631  if (N0.getValueType() == N->getValueType(0))
3632    return N0;
3633  // fold (truncate c1) -> c1
3634  if (isa<ConstantSDNode>(N0))
3635    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0);
3636  // fold (truncate (truncate x)) -> (truncate x)
3637  if (N0.getOpcode() == ISD::TRUNCATE)
3638    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
3639  // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
3640  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::SIGN_EXTEND||
3641      N0.getOpcode() == ISD::ANY_EXTEND) {
3642    if (N0.getOperand(0).getValueType().bitsLT(VT))
3643      // if the source is smaller than the dest, we still need an extend
3644      return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
3645                         N0.getOperand(0));
3646    else if (N0.getOperand(0).getValueType().bitsGT(VT))
3647      // if the source is larger than the dest, than we just need the truncate
3648      return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
3649    else
3650      // if the source and dest are the same type, we can drop both the extend
3651      // and the truncate
3652      return N0.getOperand(0);
3653  }
3654
3655  // See if we can simplify the input to this truncate through knowledge that
3656  // only the low bits are being used.  For example "trunc (or (shl x, 8), y)"
3657  // -> trunc y
3658  SDValue Shorter =
3659    GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
3660                                             VT.getSizeInBits()));
3661  if (Shorter.getNode())
3662    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Shorter);
3663
3664  // fold (truncate (load x)) -> (smaller load x)
3665  // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
3666  return ReduceLoadWidth(N);
3667}
3668
3669static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
3670  SDValue Elt = N->getOperand(i);
3671  if (Elt.getOpcode() != ISD::MERGE_VALUES)
3672    return Elt.getNode();
3673  return Elt.getOperand(Elt.getResNo()).getNode();
3674}
3675
3676/// CombineConsecutiveLoads - build_pair (load, load) -> load
3677/// if load locations are consecutive.
3678SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
3679  assert(N->getOpcode() == ISD::BUILD_PAIR);
3680
3681  LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
3682  LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
3683  if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse())
3684    return SDValue();
3685  EVT LD1VT = LD1->getValueType(0);
3686  const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3687
3688  if (ISD::isNON_EXTLoad(LD2) &&
3689      LD2->hasOneUse() &&
3690      // If both are volatile this would reduce the number of volatile loads.
3691      // If one is volatile it might be ok, but play conservative and bail out.
3692      !LD1->isVolatile() &&
3693      !LD2->isVolatile() &&
3694      TLI.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1, MFI)) {
3695    unsigned Align = LD1->getAlignment();
3696    unsigned NewAlign = TLI.getTargetData()->
3697      getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
3698
3699    if (NewAlign <= Align &&
3700        (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
3701      return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(),
3702                         LD1->getBasePtr(), LD1->getSrcValue(),
3703                         LD1->getSrcValueOffset(), false, Align);
3704  }
3705
3706  return SDValue();
3707}
3708
3709SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
3710  SDValue N0 = N->getOperand(0);
3711  EVT VT = N->getValueType(0);
3712
3713  // If the input is a BUILD_VECTOR with all constant elements, fold this now.
3714  // Only do this before legalize, since afterward the target may be depending
3715  // on the bitconvert.
3716  // First check to see if this is all constant.
3717  if (!LegalTypes &&
3718      N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
3719      VT.isVector()) {
3720    bool isSimple = true;
3721    for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i)
3722      if (N0.getOperand(i).getOpcode() != ISD::UNDEF &&
3723          N0.getOperand(i).getOpcode() != ISD::Constant &&
3724          N0.getOperand(i).getOpcode() != ISD::ConstantFP) {
3725        isSimple = false;
3726        break;
3727      }
3728
3729    EVT DestEltVT = N->getValueType(0).getVectorElementType();
3730    assert(!DestEltVT.isVector() &&
3731           "Element type of vector ValueType must not be vector!");
3732    if (isSimple)
3733      return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.getNode(), DestEltVT);
3734  }
3735
3736  // If the input is a constant, let getNode fold it.
3737  if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
3738    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, N0);
3739    if (Res.getNode() != N) {
3740      if (!LegalOperations ||
3741          TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
3742        return Res;
3743
3744      // Folding it resulted in an illegal node, and it's too late to
3745      // do that. Clean up the old node and forego the transformation.
3746      // Ideally this won't happen very often, because instcombine
3747      // and the earlier dagcombine runs (where illegal nodes are
3748      // permitted) should have folded most of them already.
3749      DAG.DeleteNode(Res.getNode());
3750    }
3751  }
3752
3753  // (conv (conv x, t1), t2) -> (conv x, t2)
3754  if (N0.getOpcode() == ISD::BIT_CONVERT)
3755    return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT,
3756                       N0.getOperand(0));
3757
3758  // fold (conv (load x)) -> (load (conv*)x)
3759  // If the resultant load doesn't need a higher alignment than the original!
3760  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
3761      // Do not change the width of a volatile load.
3762      !cast<LoadSDNode>(N0)->isVolatile() &&
3763      (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
3764    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3765    unsigned Align = TLI.getTargetData()->
3766      getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
3767    unsigned OrigAlign = LN0->getAlignment();
3768
3769    if (Align <= OrigAlign) {
3770      SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(),
3771                                 LN0->getBasePtr(),
3772                                 LN0->getSrcValue(), LN0->getSrcValueOffset(),
3773                                 LN0->isVolatile(), OrigAlign);
3774      AddToWorkList(N);
3775      CombineTo(N0.getNode(),
3776                DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
3777                            N0.getValueType(), Load),
3778                Load.getValue(1));
3779      return Load;
3780    }
3781  }
3782
3783  // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
3784  // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
3785  // This often reduces constant pool loads.
3786  if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
3787      N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
3788    SDValue NewConv = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), VT,
3789                                  N0.getOperand(0));
3790    AddToWorkList(NewConv.getNode());
3791
3792    APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
3793    if (N0.getOpcode() == ISD::FNEG)
3794      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
3795                         NewConv, DAG.getConstant(SignBit, VT));
3796    assert(N0.getOpcode() == ISD::FABS);
3797    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3798                       NewConv, DAG.getConstant(~SignBit, VT));
3799  }
3800
3801  // fold (bitconvert (fcopysign cst, x)) ->
3802  //         (or (and (bitconvert x), sign), (and cst, (not sign)))
3803  // Note that we don't handle (copysign x, cst) because this can always be
3804  // folded to an fneg or fabs.
3805  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
3806      isa<ConstantFPSDNode>(N0.getOperand(0)) &&
3807      VT.isInteger() && !VT.isVector()) {
3808    unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
3809    EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
3810    if (TLI.isTypeLegal(IntXVT) || !LegalTypes) {
3811      SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
3812                              IntXVT, N0.getOperand(1));
3813      AddToWorkList(X.getNode());
3814
3815      // If X has a different width than the result/lhs, sext it or truncate it.
3816      unsigned VTWidth = VT.getSizeInBits();
3817      if (OrigXWidth < VTWidth) {
3818        X = DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, X);
3819        AddToWorkList(X.getNode());
3820      } else if (OrigXWidth > VTWidth) {
3821        // To get the sign bit in the right place, we have to shift it right
3822        // before truncating.
3823        X = DAG.getNode(ISD::SRL, X.getDebugLoc(),
3824                        X.getValueType(), X,
3825                        DAG.getConstant(OrigXWidth-VTWidth, X.getValueType()));
3826        AddToWorkList(X.getNode());
3827        X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
3828        AddToWorkList(X.getNode());
3829      }
3830
3831      APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
3832      X = DAG.getNode(ISD::AND, X.getDebugLoc(), VT,
3833                      X, DAG.getConstant(SignBit, VT));
3834      AddToWorkList(X.getNode());
3835
3836      SDValue Cst = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
3837                                VT, N0.getOperand(0));
3838      Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT,
3839                        Cst, DAG.getConstant(~SignBit, VT));
3840      AddToWorkList(Cst.getNode());
3841
3842      return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, X, Cst);
3843    }
3844  }
3845
3846  // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
3847  if (N0.getOpcode() == ISD::BUILD_PAIR) {
3848    SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT);
3849    if (CombineLD.getNode())
3850      return CombineLD;
3851  }
3852
3853  return SDValue();
3854}
3855
3856SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
3857  EVT VT = N->getValueType(0);
3858  return CombineConsecutiveLoads(N, VT);
3859}
3860
3861/// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector
3862/// node with Constant, ConstantFP or Undef operands.  DstEltVT indicates the
3863/// destination element value type.
3864SDValue DAGCombiner::
3865ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
3866  EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
3867
3868  // If this is already the right type, we're done.
3869  if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
3870
3871  unsigned SrcBitSize = SrcEltVT.getSizeInBits();
3872  unsigned DstBitSize = DstEltVT.getSizeInBits();
3873
3874  // If this is a conversion of N elements of one type to N elements of another
3875  // type, convert each element.  This handles FP<->INT cases.
3876  if (SrcBitSize == DstBitSize) {
3877    SmallVector<SDValue, 8> Ops;
3878    for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3879      SDValue Op = BV->getOperand(i);
3880      // If the vector element type is not legal, the BUILD_VECTOR operands
3881      // are promoted and implicitly truncated.  Make that explicit here.
3882      if (Op.getValueType() != SrcEltVT)
3883        Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op);
3884      Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
3885                                DstEltVT, Op));
3886      AddToWorkList(Ops.back().getNode());
3887    }
3888    EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
3889                              BV->getValueType(0).getVectorNumElements());
3890    return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
3891                       &Ops[0], Ops.size());
3892  }
3893
3894  // Otherwise, we're growing or shrinking the elements.  To avoid having to
3895  // handle annoying details of growing/shrinking FP values, we convert them to
3896  // int first.
3897  if (SrcEltVT.isFloatingPoint()) {
3898    // Convert the input float vector to a int vector where the elements are the
3899    // same sizes.
3900    assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
3901    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
3902    BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode();
3903    SrcEltVT = IntVT;
3904  }
3905
3906  // Now we know the input is an integer vector.  If the output is a FP type,
3907  // convert to integer first, then to FP of the right size.
3908  if (DstEltVT.isFloatingPoint()) {
3909    assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
3910    EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
3911    SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode();
3912
3913    // Next, convert to FP elements of the same size.
3914    return ConstantFoldBIT_CONVERTofBUILD_VECTOR(Tmp, DstEltVT);
3915  }
3916
3917  // Okay, we know the src/dst types are both integers of differing types.
3918  // Handling growing first.
3919  assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
3920  if (SrcBitSize < DstBitSize) {
3921    unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
3922
3923    SmallVector<SDValue, 8> Ops;
3924    for (unsigned i = 0, e = BV->getNumOperands(); i != e;
3925         i += NumInputsPerOutput) {
3926      bool isLE = TLI.isLittleEndian();
3927      APInt NewBits = APInt(DstBitSize, 0);
3928      bool EltIsUndef = true;
3929      for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
3930        // Shift the previously computed bits over.
3931        NewBits <<= SrcBitSize;
3932        SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
3933        if (Op.getOpcode() == ISD::UNDEF) continue;
3934        EltIsUndef = false;
3935
3936        NewBits |= (APInt(cast<ConstantSDNode>(Op)->getAPIntValue()).
3937                    zextOrTrunc(SrcBitSize).zext(DstBitSize));
3938      }
3939
3940      if (EltIsUndef)
3941        Ops.push_back(DAG.getUNDEF(DstEltVT));
3942      else
3943        Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
3944    }
3945
3946    EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
3947    return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
3948                       &Ops[0], Ops.size());
3949  }
3950
3951  // Finally, this must be the case where we are shrinking elements: each input
3952  // turns into multiple outputs.
3953  bool isS2V = ISD::isScalarToVector(BV);
3954  unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
3955  EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
3956                            NumOutputsPerInput*BV->getNumOperands());
3957  SmallVector<SDValue, 8> Ops;
3958
3959  for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3960    if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
3961      for (unsigned j = 0; j != NumOutputsPerInput; ++j)
3962        Ops.push_back(DAG.getUNDEF(DstEltVT));
3963      continue;
3964    }
3965
3966    APInt OpVal = APInt(cast<ConstantSDNode>(BV->getOperand(i))->
3967                        getAPIntValue()).zextOrTrunc(SrcBitSize);
3968
3969    for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
3970      APInt ThisVal = APInt(OpVal).trunc(DstBitSize);
3971      Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
3972      if (isS2V && i == 0 && j == 0 && APInt(ThisVal).zext(SrcBitSize) == OpVal)
3973        // Simply turn this into a SCALAR_TO_VECTOR of the new type.
3974        return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
3975                           Ops[0]);
3976      OpVal = OpVal.lshr(DstBitSize);
3977    }
3978
3979    // For big endian targets, swap the order of the pieces of each element.
3980    if (TLI.isBigEndian())
3981      std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
3982  }
3983
3984  return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
3985                     &Ops[0], Ops.size());
3986}
3987
3988SDValue DAGCombiner::visitFADD(SDNode *N) {
3989  SDValue N0 = N->getOperand(0);
3990  SDValue N1 = N->getOperand(1);
3991  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
3992  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3993  EVT VT = N->getValueType(0);
3994
3995  // fold vector ops
3996  if (VT.isVector()) {
3997    SDValue FoldedVOp = SimplifyVBinOp(N);
3998    if (FoldedVOp.getNode()) return FoldedVOp;
3999  }
4000
4001  // fold (fadd c1, c2) -> (fadd c1, c2)
4002  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4003    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1);
4004  // canonicalize constant to RHS
4005  if (N0CFP && !N1CFP)
4006    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0);
4007  // fold (fadd A, 0) -> A
4008  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4009    return N0;
4010  // fold (fadd A, (fneg B)) -> (fsub A, B)
4011  if (isNegatibleForFree(N1, LegalOperations) == 2)
4012    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0,
4013                       GetNegatedExpression(N1, DAG, LegalOperations));
4014  // fold (fadd (fneg A), B) -> (fsub B, A)
4015  if (isNegatibleForFree(N0, LegalOperations) == 2)
4016    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1,
4017                       GetNegatedExpression(N0, DAG, LegalOperations));
4018
4019  // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
4020  if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD &&
4021      N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
4022    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0),
4023                       DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
4024                                   N0.getOperand(1), N1));
4025
4026  return SDValue();
4027}
4028
4029SDValue DAGCombiner::visitFSUB(SDNode *N) {
4030  SDValue N0 = N->getOperand(0);
4031  SDValue N1 = N->getOperand(1);
4032  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4033  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4034  EVT VT = N->getValueType(0);
4035
4036  // fold vector ops
4037  if (VT.isVector()) {
4038    SDValue FoldedVOp = SimplifyVBinOp(N);
4039    if (FoldedVOp.getNode()) return FoldedVOp;
4040  }
4041
4042  // fold (fsub c1, c2) -> c1-c2
4043  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4044    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1);
4045  // fold (fsub A, 0) -> A
4046  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4047    return N0;
4048  // fold (fsub 0, B) -> -B
4049  if (UnsafeFPMath && N0CFP && N0CFP->getValueAPF().isZero()) {
4050    if (isNegatibleForFree(N1, LegalOperations))
4051      return GetNegatedExpression(N1, DAG, LegalOperations);
4052    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4053      return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N1);
4054  }
4055  // fold (fsub A, (fneg B)) -> (fadd A, B)
4056  if (isNegatibleForFree(N1, LegalOperations))
4057    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0,
4058                       GetNegatedExpression(N1, DAG, LegalOperations));
4059
4060  return SDValue();
4061}
4062
4063SDValue DAGCombiner::visitFMUL(SDNode *N) {
4064  SDValue N0 = N->getOperand(0);
4065  SDValue N1 = N->getOperand(1);
4066  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4067  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4068  EVT VT = N->getValueType(0);
4069
4070  // fold vector ops
4071  if (VT.isVector()) {
4072    SDValue FoldedVOp = SimplifyVBinOp(N);
4073    if (FoldedVOp.getNode()) return FoldedVOp;
4074  }
4075
4076  // fold (fmul c1, c2) -> c1*c2
4077  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4078    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1);
4079  // canonicalize constant to RHS
4080  if (N0CFP && !N1CFP)
4081    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0);
4082  // fold (fmul A, 0) -> 0
4083  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4084    return N1;
4085  // fold (fmul A, 0) -> 0, vector edition.
4086  if (UnsafeFPMath && ISD::isBuildVectorAllZeros(N1.getNode()))
4087    return N1;
4088  // fold (fmul X, 2.0) -> (fadd X, X)
4089  if (N1CFP && N1CFP->isExactlyValue(+2.0))
4090    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0);
4091  // fold (fmul X, -1.0) -> (fneg X)
4092  if (N1CFP && N1CFP->isExactlyValue(-1.0))
4093    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4094      return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0);
4095
4096  // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
4097  if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
4098    if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
4099      // Both can be negated for free, check to see if at least one is cheaper
4100      // negated.
4101      if (LHSNeg == 2 || RHSNeg == 2)
4102        return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
4103                           GetNegatedExpression(N0, DAG, LegalOperations),
4104                           GetNegatedExpression(N1, DAG, LegalOperations));
4105    }
4106  }
4107
4108  // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
4109  if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FMUL &&
4110      N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
4111    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0),
4112                       DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
4113                                   N0.getOperand(1), N1));
4114
4115  return SDValue();
4116}
4117
4118SDValue DAGCombiner::visitFDIV(SDNode *N) {
4119  SDValue N0 = N->getOperand(0);
4120  SDValue N1 = N->getOperand(1);
4121  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4122  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4123  EVT VT = N->getValueType(0);
4124
4125  // fold vector ops
4126  if (VT.isVector()) {
4127    SDValue FoldedVOp = SimplifyVBinOp(N);
4128    if (FoldedVOp.getNode()) return FoldedVOp;
4129  }
4130
4131  // fold (fdiv c1, c2) -> c1/c2
4132  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4133    return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1);
4134
4135
4136  // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
4137  if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
4138    if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
4139      // Both can be negated for free, check to see if at least one is cheaper
4140      // negated.
4141      if (LHSNeg == 2 || RHSNeg == 2)
4142        return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT,
4143                           GetNegatedExpression(N0, DAG, LegalOperations),
4144                           GetNegatedExpression(N1, DAG, LegalOperations));
4145    }
4146  }
4147
4148  return SDValue();
4149}
4150
4151SDValue DAGCombiner::visitFREM(SDNode *N) {
4152  SDValue N0 = N->getOperand(0);
4153  SDValue N1 = N->getOperand(1);
4154  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4155  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4156  EVT VT = N->getValueType(0);
4157
4158  // fold (frem c1, c2) -> fmod(c1,c2)
4159  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4160    return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1);
4161
4162  return SDValue();
4163}
4164
4165SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
4166  SDValue N0 = N->getOperand(0);
4167  SDValue N1 = N->getOperand(1);
4168  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4169  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4170  EVT VT = N->getValueType(0);
4171
4172  if (N0CFP && N1CFP && VT != MVT::ppcf128)  // Constant fold
4173    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1);
4174
4175  if (N1CFP) {
4176    const APFloat& V = N1CFP->getValueAPF();
4177    // copysign(x, c1) -> fabs(x)       iff ispos(c1)
4178    // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
4179    if (!V.isNegative()) {
4180      if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
4181        return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
4182    } else {
4183      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4184        return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT,
4185                           DAG.getNode(ISD::FABS, N0.getDebugLoc(), VT, N0));
4186    }
4187  }
4188
4189  // copysign(fabs(x), y) -> copysign(x, y)
4190  // copysign(fneg(x), y) -> copysign(x, y)
4191  // copysign(copysign(x,z), y) -> copysign(x, y)
4192  if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
4193      N0.getOpcode() == ISD::FCOPYSIGN)
4194    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4195                       N0.getOperand(0), N1);
4196
4197  // copysign(x, abs(y)) -> abs(x)
4198  if (N1.getOpcode() == ISD::FABS)
4199    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
4200
4201  // copysign(x, copysign(y,z)) -> copysign(x, z)
4202  if (N1.getOpcode() == ISD::FCOPYSIGN)
4203    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4204                       N0, N1.getOperand(1));
4205
4206  // copysign(x, fp_extend(y)) -> copysign(x, y)
4207  // copysign(x, fp_round(y)) -> copysign(x, y)
4208  if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)
4209    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4210                       N0, N1.getOperand(0));
4211
4212  return SDValue();
4213}
4214
4215SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
4216  SDValue N0 = N->getOperand(0);
4217  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4218  EVT VT = N->getValueType(0);
4219  EVT OpVT = N0.getValueType();
4220
4221  // fold (sint_to_fp c1) -> c1fp
4222  if (N0C && OpVT != MVT::ppcf128)
4223    return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
4224
4225  // If the input is a legal type, and SINT_TO_FP is not legal on this target,
4226  // but UINT_TO_FP is legal on this target, try to convert.
4227  if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) &&
4228      TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) {
4229    // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
4230    if (DAG.SignBitIsZero(N0))
4231      return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
4232  }
4233
4234  return SDValue();
4235}
4236
4237SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
4238  SDValue N0 = N->getOperand(0);
4239  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4240  EVT VT = N->getValueType(0);
4241  EVT OpVT = N0.getValueType();
4242
4243  // fold (uint_to_fp c1) -> c1fp
4244  if (N0C && OpVT != MVT::ppcf128)
4245    return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
4246
4247  // If the input is a legal type, and UINT_TO_FP is not legal on this target,
4248  // but SINT_TO_FP is legal on this target, try to convert.
4249  if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) &&
4250      TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) {
4251    // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
4252    if (DAG.SignBitIsZero(N0))
4253      return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
4254  }
4255
4256  return SDValue();
4257}
4258
4259SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
4260  SDValue N0 = N->getOperand(0);
4261  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4262  EVT VT = N->getValueType(0);
4263
4264  // fold (fp_to_sint c1fp) -> c1
4265  if (N0CFP)
4266    return DAG.getNode(ISD::FP_TO_SINT, N->getDebugLoc(), VT, N0);
4267
4268  return SDValue();
4269}
4270
4271SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
4272  SDValue N0 = N->getOperand(0);
4273  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4274  EVT VT = N->getValueType(0);
4275
4276  // fold (fp_to_uint c1fp) -> c1
4277  if (N0CFP && VT != MVT::ppcf128)
4278    return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0);
4279
4280  return SDValue();
4281}
4282
4283SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
4284  SDValue N0 = N->getOperand(0);
4285  SDValue N1 = N->getOperand(1);
4286  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4287  EVT VT = N->getValueType(0);
4288
4289  // fold (fp_round c1fp) -> c1fp
4290  if (N0CFP && N0.getValueType() != MVT::ppcf128)
4291    return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1);
4292
4293  // fold (fp_round (fp_extend x)) -> x
4294  if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
4295    return N0.getOperand(0);
4296
4297  // fold (fp_round (fp_round x)) -> (fp_round x)
4298  if (N0.getOpcode() == ISD::FP_ROUND) {
4299    // This is a value preserving truncation if both round's are.
4300    bool IsTrunc = N->getConstantOperandVal(1) == 1 &&
4301                   N0.getNode()->getConstantOperandVal(1) == 1;
4302    return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0.getOperand(0),
4303                       DAG.getIntPtrConstant(IsTrunc));
4304  }
4305
4306  // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
4307  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
4308    SDValue Tmp = DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), VT,
4309                              N0.getOperand(0), N1);
4310    AddToWorkList(Tmp.getNode());
4311    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4312                       Tmp, N0.getOperand(1));
4313  }
4314
4315  return SDValue();
4316}
4317
4318SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
4319  SDValue N0 = N->getOperand(0);
4320  EVT VT = N->getValueType(0);
4321  EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
4322  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4323
4324  // fold (fp_round_inreg c1fp) -> c1fp
4325  if (N0CFP && (TLI.isTypeLegal(EVT) || !LegalTypes)) {
4326    SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT);
4327    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round);
4328  }
4329
4330  return SDValue();
4331}
4332
4333SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
4334  SDValue N0 = N->getOperand(0);
4335  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4336  EVT VT = N->getValueType(0);
4337
4338  // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
4339  if (N->hasOneUse() &&
4340      N->use_begin()->getOpcode() == ISD::FP_ROUND)
4341    return SDValue();
4342
4343  // fold (fp_extend c1fp) -> c1fp
4344  if (N0CFP && VT != MVT::ppcf128)
4345    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0);
4346
4347  // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
4348  // value of X.
4349  if (N0.getOpcode() == ISD::FP_ROUND
4350      && N0.getNode()->getConstantOperandVal(1) == 1) {
4351    SDValue In = N0.getOperand(0);
4352    if (In.getValueType() == VT) return In;
4353    if (VT.bitsLT(In.getValueType()))
4354      return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT,
4355                         In, N0.getOperand(1));
4356    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, In);
4357  }
4358
4359  // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
4360  if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
4361      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
4362       TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
4363    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4364    SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
4365                                     LN0->getChain(),
4366                                     LN0->getBasePtr(), LN0->getSrcValue(),
4367                                     LN0->getSrcValueOffset(),
4368                                     N0.getValueType(),
4369                                     LN0->isVolatile(), LN0->getAlignment());
4370    CombineTo(N, ExtLoad);
4371    CombineTo(N0.getNode(),
4372              DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(),
4373                          N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)),
4374              ExtLoad.getValue(1));
4375    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4376  }
4377
4378  return SDValue();
4379}
4380
4381SDValue DAGCombiner::visitFNEG(SDNode *N) {
4382  SDValue N0 = N->getOperand(0);
4383  EVT VT = N->getValueType(0);
4384
4385  if (isNegatibleForFree(N0, LegalOperations))
4386    return GetNegatedExpression(N0, DAG, LegalOperations);
4387
4388  // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
4389  // constant pool values.
4390  if (N0.getOpcode() == ISD::BIT_CONVERT &&
4391      !VT.isVector() &&
4392      N0.getNode()->hasOneUse() &&
4393      N0.getOperand(0).getValueType().isInteger()) {
4394    SDValue Int = N0.getOperand(0);
4395    EVT IntVT = Int.getValueType();
4396    if (IntVT.isInteger() && !IntVT.isVector()) {
4397      Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
4398              DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
4399      AddToWorkList(Int.getNode());
4400      return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
4401                         VT, Int);
4402    }
4403  }
4404
4405  return SDValue();
4406}
4407
4408SDValue DAGCombiner::visitFABS(SDNode *N) {
4409  SDValue N0 = N->getOperand(0);
4410  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4411  EVT VT = N->getValueType(0);
4412
4413  // fold (fabs c1) -> fabs(c1)
4414  if (N0CFP && VT != MVT::ppcf128)
4415    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
4416  // fold (fabs (fabs x)) -> (fabs x)
4417  if (N0.getOpcode() == ISD::FABS)
4418    return N->getOperand(0);
4419  // fold (fabs (fneg x)) -> (fabs x)
4420  // fold (fabs (fcopysign x, y)) -> (fabs x)
4421  if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
4422    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0.getOperand(0));
4423
4424  // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
4425  // constant pool values.
4426  if (N0.getOpcode() == ISD::BIT_CONVERT && N0.getNode()->hasOneUse() &&
4427      N0.getOperand(0).getValueType().isInteger() &&
4428      !N0.getOperand(0).getValueType().isVector()) {
4429    SDValue Int = N0.getOperand(0);
4430    EVT IntVT = Int.getValueType();
4431    if (IntVT.isInteger() && !IntVT.isVector()) {
4432      Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
4433             DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
4434      AddToWorkList(Int.getNode());
4435      return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
4436                         N->getValueType(0), Int);
4437    }
4438  }
4439
4440  return SDValue();
4441}
4442
4443SDValue DAGCombiner::visitBRCOND(SDNode *N) {
4444  SDValue Chain = N->getOperand(0);
4445  SDValue N1 = N->getOperand(1);
4446  SDValue N2 = N->getOperand(2);
4447  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4448
4449  // never taken branch, fold to chain
4450  if (N1C && N1C->isNullValue())
4451    return Chain;
4452  // unconditional branch
4453  if (N1C && N1C->getAPIntValue() == 1)
4454    return DAG.getNode(ISD::BR, N->getDebugLoc(), MVT::Other, Chain, N2);
4455  // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
4456  // on the target.
4457  if (N1.getOpcode() == ISD::SETCC &&
4458      TLI.isOperationLegalOrCustom(ISD::BR_CC, MVT::Other)) {
4459    return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
4460                       Chain, N1.getOperand(2),
4461                       N1.getOperand(0), N1.getOperand(1), N2);
4462  }
4463
4464  if (N1.hasOneUse() && N1.getOpcode() == ISD::SRL) {
4465    // Match this pattern so that we can generate simpler code:
4466    //
4467    //   %a = ...
4468    //   %b = and i32 %a, 2
4469    //   %c = srl i32 %b, 1
4470    //   brcond i32 %c ...
4471    //
4472    // into
4473    //
4474    //   %a = ...
4475    //   %b = and %a, 2
4476    //   %c = setcc eq %b, 0
4477    //   brcond %c ...
4478    //
4479    // This applies only when the AND constant value has one bit set and the
4480    // SRL constant is equal to the log2 of the AND constant. The back-end is
4481    // smart enough to convert the result into a TEST/JMP sequence.
4482    SDValue Op0 = N1.getOperand(0);
4483    SDValue Op1 = N1.getOperand(1);
4484
4485    if (Op0.getOpcode() == ISD::AND &&
4486        Op0.hasOneUse() &&
4487        Op1.getOpcode() == ISD::Constant) {
4488      SDValue AndOp1 = Op0.getOperand(1);
4489
4490      if (AndOp1.getOpcode() == ISD::Constant) {
4491        const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
4492
4493        if (AndConst.isPowerOf2() &&
4494            cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) {
4495          SDValue SetCC =
4496            DAG.getSetCC(N->getDebugLoc(),
4497                         TLI.getSetCCResultType(Op0.getValueType()),
4498                         Op0, DAG.getConstant(0, Op0.getValueType()),
4499                         ISD::SETNE);
4500
4501          // Replace the uses of SRL with SETCC
4502          DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
4503          removeFromWorkList(N1.getNode());
4504          DAG.DeleteNode(N1.getNode());
4505          return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
4506                             MVT::Other, Chain, SetCC, N2);
4507        }
4508      }
4509    }
4510  }
4511
4512  return SDValue();
4513}
4514
4515// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
4516//
4517SDValue DAGCombiner::visitBR_CC(SDNode *N) {
4518  CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
4519  SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
4520
4521  // Use SimplifySetCC to simplify SETCC's.
4522  SDValue Simp = SimplifySetCC(TLI.getSetCCResultType(CondLHS.getValueType()),
4523                               CondLHS, CondRHS, CC->get(), N->getDebugLoc(),
4524                               false);
4525  if (Simp.getNode()) AddToWorkList(Simp.getNode());
4526
4527  ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(Simp.getNode());
4528
4529  // fold br_cc true, dest -> br dest (unconditional branch)
4530  if (SCCC && !SCCC->isNullValue())
4531    return DAG.getNode(ISD::BR, N->getDebugLoc(), MVT::Other,
4532                       N->getOperand(0), N->getOperand(4));
4533  // fold br_cc false, dest -> unconditional fall through
4534  if (SCCC && SCCC->isNullValue())
4535    return N->getOperand(0);
4536
4537  // fold to a simpler setcc
4538  if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
4539    return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
4540                       N->getOperand(0), Simp.getOperand(2),
4541                       Simp.getOperand(0), Simp.getOperand(1),
4542                       N->getOperand(4));
4543
4544  return SDValue();
4545}
4546
4547/// CombineToPreIndexedLoadStore - Try turning a load / store into a
4548/// pre-indexed load / store when the base pointer is an add or subtract
4549/// and it has other uses besides the load / store. After the
4550/// transformation, the new indexed load / store has effectively folded
4551/// the add / subtract in and all of its other uses are redirected to the
4552/// new load / store.
4553bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
4554  if (!LegalOperations)
4555    return false;
4556
4557  bool isLoad = true;
4558  SDValue Ptr;
4559  EVT VT;
4560  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
4561    if (LD->isIndexed())
4562      return false;
4563    VT = LD->getMemoryVT();
4564    if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
4565        !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
4566      return false;
4567    Ptr = LD->getBasePtr();
4568  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
4569    if (ST->isIndexed())
4570      return false;
4571    VT = ST->getMemoryVT();
4572    if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
4573        !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
4574      return false;
4575    Ptr = ST->getBasePtr();
4576    isLoad = false;
4577  } else {
4578    return false;
4579  }
4580
4581  // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
4582  // out.  There is no reason to make this a preinc/predec.
4583  if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
4584      Ptr.getNode()->hasOneUse())
4585    return false;
4586
4587  // Ask the target to do addressing mode selection.
4588  SDValue BasePtr;
4589  SDValue Offset;
4590  ISD::MemIndexedMode AM = ISD::UNINDEXED;
4591  if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
4592    return false;
4593  // Don't create a indexed load / store with zero offset.
4594  if (isa<ConstantSDNode>(Offset) &&
4595      cast<ConstantSDNode>(Offset)->isNullValue())
4596    return false;
4597
4598  // Try turning it into a pre-indexed load / store except when:
4599  // 1) The new base ptr is a frame index.
4600  // 2) If N is a store and the new base ptr is either the same as or is a
4601  //    predecessor of the value being stored.
4602  // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
4603  //    that would create a cycle.
4604  // 4) All uses are load / store ops that use it as old base ptr.
4605
4606  // Check #1.  Preinc'ing a frame index would require copying the stack pointer
4607  // (plus the implicit offset) to a register to preinc anyway.
4608  if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
4609    return false;
4610
4611  // Check #2.
4612  if (!isLoad) {
4613    SDValue Val = cast<StoreSDNode>(N)->getValue();
4614    if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode()))
4615      return false;
4616  }
4617
4618  // Now check for #3 and #4.
4619  bool RealUse = false;
4620  for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
4621         E = Ptr.getNode()->use_end(); I != E; ++I) {
4622    SDNode *Use = *I;
4623    if (Use == N)
4624      continue;
4625    if (Use->isPredecessorOf(N))
4626      return false;
4627
4628    if (!((Use->getOpcode() == ISD::LOAD &&
4629           cast<LoadSDNode>(Use)->getBasePtr() == Ptr) ||
4630          (Use->getOpcode() == ISD::STORE &&
4631           cast<StoreSDNode>(Use)->getBasePtr() == Ptr)))
4632      RealUse = true;
4633  }
4634
4635  if (!RealUse)
4636    return false;
4637
4638  SDValue Result;
4639  if (isLoad)
4640    Result = DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(),
4641                                BasePtr, Offset, AM);
4642  else
4643    Result = DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(),
4644                                 BasePtr, Offset, AM);
4645  ++PreIndexedNodes;
4646  ++NodesCombined;
4647  DEBUG(errs() << "\nReplacing.4 ";
4648        N->dump(&DAG);
4649        errs() << "\nWith: ";
4650        Result.getNode()->dump(&DAG);
4651        errs() << '\n');
4652  WorkListRemover DeadNodes(*this);
4653  if (isLoad) {
4654    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
4655                                  &DeadNodes);
4656    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
4657                                  &DeadNodes);
4658  } else {
4659    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
4660                                  &DeadNodes);
4661  }
4662
4663  // Finally, since the node is now dead, remove it from the graph.
4664  DAG.DeleteNode(N);
4665
4666  // Replace the uses of Ptr with uses of the updated base value.
4667  DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0),
4668                                &DeadNodes);
4669  removeFromWorkList(Ptr.getNode());
4670  DAG.DeleteNode(Ptr.getNode());
4671
4672  return true;
4673}
4674
4675/// CombineToPostIndexedLoadStore - Try to combine a load / store with a
4676/// add / sub of the base pointer node into a post-indexed load / store.
4677/// The transformation folded the add / subtract into the new indexed
4678/// load / store effectively and all of its uses are redirected to the
4679/// new load / store.
4680bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
4681  if (!LegalOperations)
4682    return false;
4683
4684  bool isLoad = true;
4685  SDValue Ptr;
4686  EVT VT;
4687  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
4688    if (LD->isIndexed())
4689      return false;
4690    VT = LD->getMemoryVT();
4691    if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
4692        !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
4693      return false;
4694    Ptr = LD->getBasePtr();
4695  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
4696    if (ST->isIndexed())
4697      return false;
4698    VT = ST->getMemoryVT();
4699    if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
4700        !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
4701      return false;
4702    Ptr = ST->getBasePtr();
4703    isLoad = false;
4704  } else {
4705    return false;
4706  }
4707
4708  if (Ptr.getNode()->hasOneUse())
4709    return false;
4710
4711  for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
4712         E = Ptr.getNode()->use_end(); I != E; ++I) {
4713    SDNode *Op = *I;
4714    if (Op == N ||
4715        (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
4716      continue;
4717
4718    SDValue BasePtr;
4719    SDValue Offset;
4720    ISD::MemIndexedMode AM = ISD::UNINDEXED;
4721    if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
4722      if (Ptr == Offset && Op->getOpcode() == ISD::ADD)
4723        std::swap(BasePtr, Offset);
4724      if (Ptr != BasePtr)
4725        continue;
4726      // Don't create a indexed load / store with zero offset.
4727      if (isa<ConstantSDNode>(Offset) &&
4728          cast<ConstantSDNode>(Offset)->isNullValue())
4729        continue;
4730
4731      // Try turning it into a post-indexed load / store except when
4732      // 1) All uses are load / store ops that use it as base ptr.
4733      // 2) Op must be independent of N, i.e. Op is neither a predecessor
4734      //    nor a successor of N. Otherwise, if Op is folded that would
4735      //    create a cycle.
4736
4737      if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
4738        continue;
4739
4740      // Check for #1.
4741      bool TryNext = false;
4742      for (SDNode::use_iterator II = BasePtr.getNode()->use_begin(),
4743             EE = BasePtr.getNode()->use_end(); II != EE; ++II) {
4744        SDNode *Use = *II;
4745        if (Use == Ptr.getNode())
4746          continue;
4747
4748        // If all the uses are load / store addresses, then don't do the
4749        // transformation.
4750        if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
4751          bool RealUse = false;
4752          for (SDNode::use_iterator III = Use->use_begin(),
4753                 EEE = Use->use_end(); III != EEE; ++III) {
4754            SDNode *UseUse = *III;
4755            if (!((UseUse->getOpcode() == ISD::LOAD &&
4756                   cast<LoadSDNode>(UseUse)->getBasePtr().getNode() == Use) ||
4757                  (UseUse->getOpcode() == ISD::STORE &&
4758                   cast<StoreSDNode>(UseUse)->getBasePtr().getNode() == Use)))
4759              RealUse = true;
4760          }
4761
4762          if (!RealUse) {
4763            TryNext = true;
4764            break;
4765          }
4766        }
4767      }
4768
4769      if (TryNext)
4770        continue;
4771
4772      // Check for #2
4773      if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
4774        SDValue Result = isLoad
4775          ? DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(),
4776                               BasePtr, Offset, AM)
4777          : DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(),
4778                                BasePtr, Offset, AM);
4779        ++PostIndexedNodes;
4780        ++NodesCombined;
4781        DEBUG(errs() << "\nReplacing.5 ";
4782              N->dump(&DAG);
4783              errs() << "\nWith: ";
4784              Result.getNode()->dump(&DAG);
4785              errs() << '\n');
4786        WorkListRemover DeadNodes(*this);
4787        if (isLoad) {
4788          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
4789                                        &DeadNodes);
4790          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
4791                                        &DeadNodes);
4792        } else {
4793          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
4794                                        &DeadNodes);
4795        }
4796
4797        // Finally, since the node is now dead, remove it from the graph.
4798        DAG.DeleteNode(N);
4799
4800        // Replace the uses of Use with uses of the updated base value.
4801        DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
4802                                      Result.getValue(isLoad ? 1 : 0),
4803                                      &DeadNodes);
4804        removeFromWorkList(Op);
4805        DAG.DeleteNode(Op);
4806        return true;
4807      }
4808    }
4809  }
4810
4811  return false;
4812}
4813
4814/// InferAlignment - If we can infer some alignment information from this
4815/// pointer, return it.
4816static unsigned InferAlignment(SDValue Ptr, SelectionDAG &DAG) {
4817  // If this is a direct reference to a stack slot, use information about the
4818  // stack slot's alignment.
4819  int FrameIdx = 1 << 31;
4820  int64_t FrameOffset = 0;
4821  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
4822    FrameIdx = FI->getIndex();
4823  } else if (Ptr.getOpcode() == ISD::ADD &&
4824             isa<ConstantSDNode>(Ptr.getOperand(1)) &&
4825             isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
4826    FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4827    FrameOffset = Ptr.getConstantOperandVal(1);
4828  }
4829
4830  if (FrameIdx != (1 << 31)) {
4831    // FIXME: Handle FI+CST.
4832    const MachineFrameInfo &MFI = *DAG.getMachineFunction().getFrameInfo();
4833    if (MFI.isFixedObjectIndex(FrameIdx)) {
4834      int64_t ObjectOffset = MFI.getObjectOffset(FrameIdx) + FrameOffset;
4835
4836      // The alignment of the frame index can be determined from its offset from
4837      // the incoming frame position.  If the frame object is at offset 32 and
4838      // the stack is guaranteed to be 16-byte aligned, then we know that the
4839      // object is 16-byte aligned.
4840      unsigned StackAlign = DAG.getTarget().getFrameInfo()->getStackAlignment();
4841      unsigned Align = MinAlign(ObjectOffset, StackAlign);
4842
4843      // Finally, the frame object itself may have a known alignment.  Factor
4844      // the alignment + offset into a new alignment.  For example, if we know
4845      // the  FI is 8 byte aligned, but the pointer is 4 off, we really have a
4846      // 4-byte alignment of the resultant pointer.  Likewise align 4 + 4-byte
4847      // offset = 4-byte alignment, align 4 + 1-byte offset = align 1, etc.
4848      unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
4849                                      FrameOffset);
4850      return std::max(Align, FIInfoAlign);
4851    }
4852  }
4853
4854  return 0;
4855}
4856
4857SDValue DAGCombiner::visitLOAD(SDNode *N) {
4858  LoadSDNode *LD  = cast<LoadSDNode>(N);
4859  SDValue Chain = LD->getChain();
4860  SDValue Ptr   = LD->getBasePtr();
4861
4862  // Try to infer better alignment information than the load already has.
4863  if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
4864    if (unsigned Align = InferAlignment(Ptr, DAG)) {
4865      if (Align > LD->getAlignment())
4866        return DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(),
4867                              LD->getValueType(0),
4868                              Chain, Ptr, LD->getSrcValue(),
4869                              LD->getSrcValueOffset(), LD->getMemoryVT(),
4870                              LD->isVolatile(), Align);
4871    }
4872  }
4873
4874  // If load is not volatile and there are no uses of the loaded value (and
4875  // the updated indexed value in case of indexed loads), change uses of the
4876  // chain value into uses of the chain input (i.e. delete the dead load).
4877  if (!LD->isVolatile()) {
4878    if (N->getValueType(1) == MVT::Other) {
4879      // Unindexed loads.
4880      if (N->hasNUsesOfValue(0, 0)) {
4881        // It's not safe to use the two value CombineTo variant here. e.g.
4882        // v1, chain2 = load chain1, loc
4883        // v2, chain3 = load chain2, loc
4884        // v3         = add v2, c
4885        // Now we replace use of chain2 with chain1.  This makes the second load
4886        // isomorphic to the one we are deleting, and thus makes this load live.
4887        DEBUG(errs() << "\nReplacing.6 ";
4888              N->dump(&DAG);
4889              errs() << "\nWith chain: ";
4890              Chain.getNode()->dump(&DAG);
4891              errs() << "\n");
4892        WorkListRemover DeadNodes(*this);
4893        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain, &DeadNodes);
4894
4895        if (N->use_empty()) {
4896          removeFromWorkList(N);
4897          DAG.DeleteNode(N);
4898        }
4899
4900        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4901      }
4902    } else {
4903      // Indexed loads.
4904      assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
4905      if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
4906        SDValue Undef = DAG.getUNDEF(N->getValueType(0));
4907        DEBUG(errs() << "\nReplacing.6 ";
4908              N->dump(&DAG);
4909              errs() << "\nWith: ";
4910              Undef.getNode()->dump(&DAG);
4911              errs() << " and 2 other values\n");
4912        WorkListRemover DeadNodes(*this);
4913        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef, &DeadNodes);
4914        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1),
4915                                      DAG.getUNDEF(N->getValueType(1)),
4916                                      &DeadNodes);
4917        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain, &DeadNodes);
4918        removeFromWorkList(N);
4919        DAG.DeleteNode(N);
4920        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4921      }
4922    }
4923  }
4924
4925  // If this load is directly stored, replace the load value with the stored
4926  // value.
4927  // TODO: Handle store large -> read small portion.
4928  // TODO: Handle TRUNCSTORE/LOADEXT
4929  if (LD->getExtensionType() == ISD::NON_EXTLOAD &&
4930      !LD->isVolatile()) {
4931    if (ISD::isNON_TRUNCStore(Chain.getNode())) {
4932      StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
4933      if (PrevST->getBasePtr() == Ptr &&
4934          PrevST->getValue().getValueType() == N->getValueType(0))
4935      return CombineTo(N, Chain.getOperand(1), Chain);
4936    }
4937  }
4938
4939  if (CombinerAA) {
4940    // Walk up chain skipping non-aliasing memory nodes.
4941    SDValue BetterChain = FindBetterChain(N, Chain);
4942
4943    // If there is a better chain.
4944    if (Chain != BetterChain) {
4945      SDValue ReplLoad;
4946
4947      // Replace the chain to void dependency.
4948      if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
4949        ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(),
4950                               BetterChain, Ptr,
4951                               LD->getSrcValue(), LD->getSrcValueOffset(),
4952                               LD->isVolatile(), LD->getAlignment());
4953      } else {
4954        ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(),
4955                                  LD->getValueType(0),
4956                                  BetterChain, Ptr, LD->getSrcValue(),
4957                                  LD->getSrcValueOffset(),
4958                                  LD->getMemoryVT(),
4959                                  LD->isVolatile(),
4960                                  LD->getAlignment());
4961      }
4962
4963      // Create token factor to keep old chain connected.
4964      SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
4965                                  MVT::Other, Chain, ReplLoad.getValue(1));
4966
4967      // Make sure the new and old chains are cleaned up.
4968      AddToWorkList(Token.getNode());
4969
4970      // Replace uses with load result and token factor. Don't add users
4971      // to work list.
4972      return CombineTo(N, ReplLoad.getValue(0), Token, false);
4973    }
4974  }
4975
4976  // Try transforming N to an indexed load.
4977  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
4978    return SDValue(N, 0);
4979
4980  return SDValue();
4981}
4982
4983
4984/// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is
4985/// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some
4986/// of the loaded bits, try narrowing the load and store if it would end up
4987/// being a win for performance or code size.
4988SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
4989  StoreSDNode *ST  = cast<StoreSDNode>(N);
4990  if (ST->isVolatile())
4991    return SDValue();
4992
4993  SDValue Chain = ST->getChain();
4994  SDValue Value = ST->getValue();
4995  SDValue Ptr   = ST->getBasePtr();
4996  EVT VT = Value.getValueType();
4997
4998  if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
4999    return SDValue();
5000
5001  unsigned Opc = Value.getOpcode();
5002  if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
5003      Value.getOperand(1).getOpcode() != ISD::Constant)
5004    return SDValue();
5005
5006  SDValue N0 = Value.getOperand(0);
5007  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
5008    LoadSDNode *LD = cast<LoadSDNode>(N0);
5009    if (LD->getBasePtr() != Ptr)
5010      return SDValue();
5011
5012    // Find the type to narrow it the load / op / store to.
5013    SDValue N1 = Value.getOperand(1);
5014    unsigned BitWidth = N1.getValueSizeInBits();
5015    APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
5016    if (Opc == ISD::AND)
5017      Imm ^= APInt::getAllOnesValue(BitWidth);
5018    if (Imm == 0 || Imm.isAllOnesValue())
5019      return SDValue();
5020    unsigned ShAmt = Imm.countTrailingZeros();
5021    unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
5022    unsigned NewBW = NextPowerOf2(MSB - ShAmt);
5023    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
5024    while (NewBW < BitWidth &&
5025           !(TLI.isOperationLegalOrCustom(Opc, NewVT) &&
5026             TLI.isNarrowingProfitable(VT, NewVT))) {
5027      NewBW = NextPowerOf2(NewBW);
5028      NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
5029    }
5030    if (NewBW >= BitWidth)
5031      return SDValue();
5032
5033    // If the lsb changed does not start at the type bitwidth boundary,
5034    // start at the previous one.
5035    if (ShAmt % NewBW)
5036      ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
5037    APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, ShAmt + NewBW);
5038    if ((Imm & Mask) == Imm) {
5039      APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
5040      if (Opc == ISD::AND)
5041        NewImm ^= APInt::getAllOnesValue(NewBW);
5042      uint64_t PtrOff = ShAmt / 8;
5043      // For big endian targets, we need to adjust the offset to the pointer to
5044      // load the correct bytes.
5045      if (TLI.isBigEndian())
5046        PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
5047
5048      unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
5049      if (NewAlign <
5050          TLI.getTargetData()->getABITypeAlignment(NewVT.getTypeForEVT(*DAG.getContext())))
5051        return SDValue();
5052
5053      SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
5054                                   Ptr.getValueType(), Ptr,
5055                                   DAG.getConstant(PtrOff, Ptr.getValueType()));
5056      SDValue NewLD = DAG.getLoad(NewVT, N0.getDebugLoc(),
5057                                  LD->getChain(), NewPtr,
5058                                  LD->getSrcValue(), LD->getSrcValueOffset(),
5059                                  LD->isVolatile(), NewAlign);
5060      SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD,
5061                                   DAG.getConstant(NewImm, NewVT));
5062      SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(),
5063                                   NewVal, NewPtr,
5064                                   ST->getSrcValue(), ST->getSrcValueOffset(),
5065                                   false, NewAlign);
5066
5067      AddToWorkList(NewPtr.getNode());
5068      AddToWorkList(NewLD.getNode());
5069      AddToWorkList(NewVal.getNode());
5070      WorkListRemover DeadNodes(*this);
5071      DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1),
5072                                    &DeadNodes);
5073      ++OpsNarrowed;
5074      return NewST;
5075    }
5076  }
5077
5078  return SDValue();
5079}
5080
5081SDValue DAGCombiner::visitSTORE(SDNode *N) {
5082  StoreSDNode *ST  = cast<StoreSDNode>(N);
5083  SDValue Chain = ST->getChain();
5084  SDValue Value = ST->getValue();
5085  SDValue Ptr   = ST->getBasePtr();
5086
5087  // Try to infer better alignment information than the store already has.
5088  if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
5089    if (unsigned Align = InferAlignment(Ptr, DAG)) {
5090      if (Align > ST->getAlignment())
5091        return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
5092                                 Ptr, ST->getSrcValue(),
5093                                 ST->getSrcValueOffset(), ST->getMemoryVT(),
5094                                 ST->isVolatile(), Align);
5095    }
5096  }
5097
5098  // If this is a store of a bit convert, store the input value if the
5099  // resultant store does not need a higher alignment than the original.
5100  if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
5101      ST->isUnindexed()) {
5102    unsigned OrigAlign = ST->getAlignment();
5103    EVT SVT = Value.getOperand(0).getValueType();
5104    unsigned Align = TLI.getTargetData()->
5105      getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
5106    if (Align <= OrigAlign &&
5107        ((!LegalOperations && !ST->isVolatile()) ||
5108         TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
5109      return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0),
5110                          Ptr, ST->getSrcValue(),
5111                          ST->getSrcValueOffset(), ST->isVolatile(), OrigAlign);
5112  }
5113
5114  // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
5115  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
5116    // NOTE: If the original store is volatile, this transform must not increase
5117    // the number of stores.  For example, on x86-32 an f64 can be stored in one
5118    // processor operation but an i64 (which is not legal) requires two.  So the
5119    // transform should not be done in this case.
5120    if (Value.getOpcode() != ISD::TargetConstantFP) {
5121      SDValue Tmp;
5122      switch (CFP->getValueType(0).getSimpleVT().SimpleTy) {
5123      default: llvm_unreachable("Unknown FP type");
5124      case MVT::f80:    // We don't do this for these yet.
5125      case MVT::f128:
5126      case MVT::ppcf128:
5127        break;
5128      case MVT::f32:
5129        if (((TLI.isTypeLegal(MVT::i32) || !LegalTypes) && !LegalOperations &&
5130             !ST->isVolatile()) ||
5131            TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
5132          Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
5133                              bitcastToAPInt().getZExtValue(), MVT::i32);
5134          return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
5135                              Ptr, ST->getSrcValue(),
5136                              ST->getSrcValueOffset(), ST->isVolatile(),
5137                              ST->getAlignment());
5138        }
5139        break;
5140      case MVT::f64:
5141        if (((TLI.isTypeLegal(MVT::i64) || !LegalTypes) && !LegalOperations &&
5142             !ST->isVolatile()) ||
5143            TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
5144          Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
5145                                getZExtValue(), MVT::i64);
5146          return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
5147                              Ptr, ST->getSrcValue(),
5148                              ST->getSrcValueOffset(), ST->isVolatile(),
5149                              ST->getAlignment());
5150        } else if (!ST->isVolatile() &&
5151                   TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
5152          // Many FP stores are not made apparent until after legalize, e.g. for
5153          // argument passing.  Since this is so common, custom legalize the
5154          // 64-bit integer store into two 32-bit stores.
5155          uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
5156          SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32);
5157          SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
5158          if (TLI.isBigEndian()) std::swap(Lo, Hi);
5159
5160          int SVOffset = ST->getSrcValueOffset();
5161          unsigned Alignment = ST->getAlignment();
5162          bool isVolatile = ST->isVolatile();
5163
5164          SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo,
5165                                     Ptr, ST->getSrcValue(),
5166                                     ST->getSrcValueOffset(),
5167                                     isVolatile, ST->getAlignment());
5168          Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr,
5169                            DAG.getConstant(4, Ptr.getValueType()));
5170          SVOffset += 4;
5171          Alignment = MinAlign(Alignment, 4U);
5172          SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi,
5173                                     Ptr, ST->getSrcValue(),
5174                                     SVOffset, isVolatile, Alignment);
5175          return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
5176                             St0, St1);
5177        }
5178
5179        break;
5180      }
5181    }
5182  }
5183
5184  if (CombinerAA) {
5185    // Walk up chain skipping non-aliasing memory nodes.
5186    SDValue BetterChain = FindBetterChain(N, Chain);
5187
5188    // If there is a better chain.
5189    if (Chain != BetterChain) {
5190      SDValue ReplStore;
5191
5192      // Replace the chain to avoid dependency.
5193      if (ST->isTruncatingStore()) {
5194        ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr,
5195                                      ST->getSrcValue(),ST->getSrcValueOffset(),
5196                                      ST->getMemoryVT(),
5197                                      ST->isVolatile(), ST->getAlignment());
5198      } else {
5199        ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr,
5200                                 ST->getSrcValue(), ST->getSrcValueOffset(),
5201                                 ST->isVolatile(), ST->getAlignment());
5202      }
5203
5204      // Create token to keep both nodes around.
5205      SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
5206                                  MVT::Other, Chain, ReplStore);
5207
5208      // Make sure the new and old chains are cleaned up.
5209      AddToWorkList(Token.getNode());
5210
5211      // Don't add users to work list.
5212      return CombineTo(N, Token, false);
5213    }
5214  }
5215
5216  // Try transforming N to an indexed store.
5217  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
5218    return SDValue(N, 0);
5219
5220  // FIXME: is there such a thing as a truncating indexed store?
5221  if (ST->isTruncatingStore() && ST->isUnindexed() &&
5222      Value.getValueType().isInteger()) {
5223    // See if we can simplify the input to this truncstore with knowledge that
5224    // only the low bits are being used.  For example:
5225    // "truncstore (or (shl x, 8), y), i8"  -> "truncstore y, i8"
5226    SDValue Shorter =
5227      GetDemandedBits(Value,
5228                      APInt::getLowBitsSet(Value.getValueSizeInBits(),
5229                                           ST->getMemoryVT().getSizeInBits()));
5230    AddToWorkList(Value.getNode());
5231    if (Shorter.getNode())
5232      return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter,
5233                               Ptr, ST->getSrcValue(),
5234                               ST->getSrcValueOffset(), ST->getMemoryVT(),
5235                               ST->isVolatile(), ST->getAlignment());
5236
5237    // Otherwise, see if we can simplify the operation with
5238    // SimplifyDemandedBits, which only works if the value has a single use.
5239    if (SimplifyDemandedBits(Value,
5240                             APInt::getLowBitsSet(
5241                               Value.getValueSizeInBits(),
5242                               ST->getMemoryVT().getSizeInBits())))
5243      return SDValue(N, 0);
5244  }
5245
5246  // If this is a load followed by a store to the same location, then the store
5247  // is dead/noop.
5248  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
5249    if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
5250        ST->isUnindexed() && !ST->isVolatile() &&
5251        // There can't be any side effects between the load and store, such as
5252        // a call or store.
5253        Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
5254      // The store is dead, remove it.
5255      return Chain;
5256    }
5257  }
5258
5259  // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
5260  // truncating store.  We can do this even if this is already a truncstore.
5261  if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
5262      && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
5263      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
5264                            ST->getMemoryVT())) {
5265    return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0),
5266                             Ptr, ST->getSrcValue(),
5267                             ST->getSrcValueOffset(), ST->getMemoryVT(),
5268                             ST->isVolatile(), ST->getAlignment());
5269  }
5270
5271  return ReduceLoadOpStoreWidth(N);
5272}
5273
5274SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
5275  SDValue InVec = N->getOperand(0);
5276  SDValue InVal = N->getOperand(1);
5277  SDValue EltNo = N->getOperand(2);
5278
5279  // If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new
5280  // vector with the inserted element.
5281  if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
5282    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
5283    SmallVector<SDValue, 8> Ops(InVec.getNode()->op_begin(),
5284                                InVec.getNode()->op_end());
5285    if (Elt < Ops.size())
5286      Ops[Elt] = InVal;
5287    return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5288                       InVec.getValueType(), &Ops[0], Ops.size());
5289  }
5290  // If the invec is an UNDEF and if EltNo is a constant, create a new
5291  // BUILD_VECTOR with undef elements and the inserted element.
5292  if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
5293      isa<ConstantSDNode>(EltNo)) {
5294    EVT VT = InVec.getValueType();
5295    EVT EltVT = VT.getVectorElementType();
5296    unsigned NElts = VT.getVectorNumElements();
5297    SmallVector<SDValue, 8> Ops(NElts, DAG.getUNDEF(EltVT));
5298
5299    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
5300    if (Elt < Ops.size())
5301      Ops[Elt] = InVal;
5302    return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5303                       InVec.getValueType(), &Ops[0], Ops.size());
5304  }
5305  return SDValue();
5306}
5307
5308SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
5309  // (vextract (scalar_to_vector val, 0) -> val
5310  SDValue InVec = N->getOperand(0);
5311
5312 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5313   // If the operand is wider than the vector element type then it is implicitly
5314   // truncated.  Make that explicit here.
5315   EVT EltVT = InVec.getValueType().getVectorElementType();
5316   SDValue InOp = InVec.getOperand(0);
5317   if (InOp.getValueType() != EltVT)
5318     return DAG.getNode(ISD::TRUNCATE, InVec.getDebugLoc(), EltVT, InOp);
5319   return InOp;
5320 }
5321
5322  // Perform only after legalization to ensure build_vector / vector_shuffle
5323  // optimizations have already been done.
5324  if (!LegalOperations) return SDValue();
5325
5326  // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
5327  // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
5328  // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
5329  SDValue EltNo = N->getOperand(1);
5330
5331  if (isa<ConstantSDNode>(EltNo)) {
5332    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
5333    bool NewLoad = false;
5334    bool BCNumEltsChanged = false;
5335    EVT VT = InVec.getValueType();
5336    EVT ExtVT = VT.getVectorElementType();
5337    EVT LVT = ExtVT;
5338
5339    if (InVec.getOpcode() == ISD::BIT_CONVERT) {
5340      EVT BCVT = InVec.getOperand(0).getValueType();
5341      if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
5342        return SDValue();
5343      if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
5344        BCNumEltsChanged = true;
5345      InVec = InVec.getOperand(0);
5346      ExtVT = BCVT.getVectorElementType();
5347      NewLoad = true;
5348    }
5349
5350    LoadSDNode *LN0 = NULL;
5351    const ShuffleVectorSDNode *SVN = NULL;
5352    if (ISD::isNormalLoad(InVec.getNode())) {
5353      LN0 = cast<LoadSDNode>(InVec);
5354    } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5355               InVec.getOperand(0).getValueType() == ExtVT &&
5356               ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
5357      LN0 = cast<LoadSDNode>(InVec.getOperand(0));
5358    } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
5359      // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
5360      // =>
5361      // (load $addr+1*size)
5362
5363      // If the bit convert changed the number of elements, it is unsafe
5364      // to examine the mask.
5365      if (BCNumEltsChanged)
5366        return SDValue();
5367
5368      // Select the input vector, guarding against out of range extract vector.
5369      unsigned NumElems = VT.getVectorNumElements();
5370      int Idx = (Elt > NumElems) ? -1 : SVN->getMaskElt(Elt);
5371      InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
5372
5373      if (InVec.getOpcode() == ISD::BIT_CONVERT)
5374        InVec = InVec.getOperand(0);
5375      if (ISD::isNormalLoad(InVec.getNode())) {
5376        LN0 = cast<LoadSDNode>(InVec);
5377        Elt = (Idx < (int)NumElems) ? Idx : Idx - NumElems;
5378      }
5379    }
5380
5381    if (!LN0 || !LN0->hasOneUse() || LN0->isVolatile())
5382      return SDValue();
5383
5384    unsigned Align = LN0->getAlignment();
5385    if (NewLoad) {
5386      // Check the resultant load doesn't need a higher alignment than the
5387      // original load.
5388      unsigned NewAlign =
5389        TLI.getTargetData()->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
5390
5391      if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
5392        return SDValue();
5393
5394      Align = NewAlign;
5395    }
5396
5397    SDValue NewPtr = LN0->getBasePtr();
5398    if (Elt) {
5399      unsigned PtrOff = LVT.getSizeInBits() * Elt / 8;
5400      EVT PtrType = NewPtr.getValueType();
5401      if (TLI.isBigEndian())
5402        PtrOff = VT.getSizeInBits() / 8 - PtrOff;
5403      NewPtr = DAG.getNode(ISD::ADD, N->getDebugLoc(), PtrType, NewPtr,
5404                           DAG.getConstant(PtrOff, PtrType));
5405    }
5406
5407    return DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
5408                       LN0->getSrcValue(), LN0->getSrcValueOffset(),
5409                       LN0->isVolatile(), Align);
5410  }
5411
5412  return SDValue();
5413}
5414
5415SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
5416  unsigned NumInScalars = N->getNumOperands();
5417  EVT VT = N->getValueType(0);
5418
5419  // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
5420  // operations.  If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
5421  // at most two distinct vectors, turn this into a shuffle node.
5422  SDValue VecIn1, VecIn2;
5423  for (unsigned i = 0; i != NumInScalars; ++i) {
5424    // Ignore undef inputs.
5425    if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
5426
5427    // If this input is something other than a EXTRACT_VECTOR_ELT with a
5428    // constant index, bail out.
5429    if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5430        !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) {
5431      VecIn1 = VecIn2 = SDValue(0, 0);
5432      break;
5433    }
5434
5435    // If the input vector type disagrees with the result of the build_vector,
5436    // we can't make a shuffle.
5437    SDValue ExtractedFromVec = N->getOperand(i).getOperand(0);
5438    if (ExtractedFromVec.getValueType() != VT) {
5439      VecIn1 = VecIn2 = SDValue(0, 0);
5440      break;
5441    }
5442
5443    // Otherwise, remember this.  We allow up to two distinct input vectors.
5444    if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
5445      continue;
5446
5447    if (VecIn1.getNode() == 0) {
5448      VecIn1 = ExtractedFromVec;
5449    } else if (VecIn2.getNode() == 0) {
5450      VecIn2 = ExtractedFromVec;
5451    } else {
5452      // Too many inputs.
5453      VecIn1 = VecIn2 = SDValue(0, 0);
5454      break;
5455    }
5456  }
5457
5458  // If everything is good, we can make a shuffle operation.
5459  if (VecIn1.getNode()) {
5460    SmallVector<int, 8> Mask;
5461    for (unsigned i = 0; i != NumInScalars; ++i) {
5462      if (N->getOperand(i).getOpcode() == ISD::UNDEF) {
5463        Mask.push_back(-1);
5464        continue;
5465      }
5466
5467      // If extracting from the first vector, just use the index directly.
5468      SDValue Extract = N->getOperand(i);
5469      SDValue ExtVal = Extract.getOperand(1);
5470      if (Extract.getOperand(0) == VecIn1) {
5471        unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
5472        if (ExtIndex > VT.getVectorNumElements())
5473          return SDValue();
5474
5475        Mask.push_back(ExtIndex);
5476        continue;
5477      }
5478
5479      // Otherwise, use InIdx + VecSize
5480      unsigned Idx = cast<ConstantSDNode>(ExtVal)->getZExtValue();
5481      Mask.push_back(Idx+NumInScalars);
5482    }
5483
5484    // Add count and size info.
5485    if (!TLI.isTypeLegal(VT) && LegalTypes)
5486      return SDValue();
5487
5488    // Return the new VECTOR_SHUFFLE node.
5489    SDValue Ops[2];
5490    Ops[0] = VecIn1;
5491    Ops[1] = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
5492    return DAG.getVectorShuffle(VT, N->getDebugLoc(), Ops[0], Ops[1], &Mask[0]);
5493  }
5494
5495  return SDValue();
5496}
5497
5498SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
5499  // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
5500  // EXTRACT_SUBVECTOR operations.  If so, and if the EXTRACT_SUBVECTOR vector
5501  // inputs come from at most two distinct vectors, turn this into a shuffle
5502  // node.
5503
5504  // If we only have one input vector, we don't need to do any concatenation.
5505  if (N->getNumOperands() == 1)
5506    return N->getOperand(0);
5507
5508  return SDValue();
5509}
5510
5511SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
5512  return SDValue();
5513
5514  EVT VT = N->getValueType(0);
5515  unsigned NumElts = VT.getVectorNumElements();
5516
5517  SDValue N0 = N->getOperand(0);
5518
5519  assert(N0.getValueType().getVectorNumElements() == NumElts &&
5520        "Vector shuffle must be normalized in DAG");
5521
5522  // FIXME: implement canonicalizations from DAG.getVectorShuffle()
5523
5524  // If it is a splat, check if the argument vector is a build_vector with
5525  // all scalar elements the same.
5526  if (cast<ShuffleVectorSDNode>(N)->isSplat()) {
5527    SDNode *V = N0.getNode();
5528
5529
5530    // If this is a bit convert that changes the element type of the vector but
5531    // not the number of vector elements, look through it.  Be careful not to
5532    // look though conversions that change things like v4f32 to v2f64.
5533    if (V->getOpcode() == ISD::BIT_CONVERT) {
5534      SDValue ConvInput = V->getOperand(0);
5535      if (ConvInput.getValueType().isVector() &&
5536          ConvInput.getValueType().getVectorNumElements() == NumElts)
5537        V = ConvInput.getNode();
5538    }
5539
5540    if (V->getOpcode() == ISD::BUILD_VECTOR) {
5541      unsigned NumElems = V->getNumOperands();
5542      unsigned BaseIdx = cast<ShuffleVectorSDNode>(N)->getSplatIndex();
5543      if (NumElems > BaseIdx) {
5544        SDValue Base;
5545        bool AllSame = true;
5546        for (unsigned i = 0; i != NumElems; ++i) {
5547          if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
5548            Base = V->getOperand(i);
5549            break;
5550          }
5551        }
5552        // Splat of <u, u, u, u>, return <u, u, u, u>
5553        if (!Base.getNode())
5554          return N0;
5555        for (unsigned i = 0; i != NumElems; ++i) {
5556          if (V->getOperand(i) != Base) {
5557            AllSame = false;
5558            break;
5559          }
5560        }
5561        // Splat of <x, x, x, x>, return <x, x, x, x>
5562        if (AllSame)
5563          return N0;
5564      }
5565    }
5566  }
5567  return SDValue();
5568}
5569
5570/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
5571/// an AND to a vector_shuffle with the destination vector and a zero vector.
5572/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
5573///      vector_shuffle V, Zero, <0, 4, 2, 4>
5574SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
5575  EVT VT = N->getValueType(0);
5576  DebugLoc dl = N->getDebugLoc();
5577  SDValue LHS = N->getOperand(0);
5578  SDValue RHS = N->getOperand(1);
5579  if (N->getOpcode() == ISD::AND) {
5580    if (RHS.getOpcode() == ISD::BIT_CONVERT)
5581      RHS = RHS.getOperand(0);
5582    if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
5583      SmallVector<int, 8> Indices;
5584      unsigned NumElts = RHS.getNumOperands();
5585      for (unsigned i = 0; i != NumElts; ++i) {
5586        SDValue Elt = RHS.getOperand(i);
5587        if (!isa<ConstantSDNode>(Elt))
5588          return SDValue();
5589        else if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
5590          Indices.push_back(i);
5591        else if (cast<ConstantSDNode>(Elt)->isNullValue())
5592          Indices.push_back(NumElts);
5593        else
5594          return SDValue();
5595      }
5596
5597      // Let's see if the target supports this vector_shuffle.
5598      EVT RVT = RHS.getValueType();
5599      if (!TLI.isVectorClearMaskLegal(Indices, RVT))
5600        return SDValue();
5601
5602      // Return the new VECTOR_SHUFFLE node.
5603      EVT EltVT = RVT.getVectorElementType();
5604      SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
5605                                     DAG.getConstant(0, EltVT));
5606      SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5607                                 RVT, &ZeroOps[0], ZeroOps.size());
5608      LHS = DAG.getNode(ISD::BIT_CONVERT, dl, RVT, LHS);
5609      SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
5610      return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Shuf);
5611    }
5612  }
5613
5614  return SDValue();
5615}
5616
5617/// SimplifyVBinOp - Visit a binary vector operation, like ADD.
5618SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
5619  // After legalize, the target may be depending on adds and other
5620  // binary ops to provide legal ways to construct constants or other
5621  // things. Simplifying them may result in a loss of legality.
5622  if (LegalOperations) return SDValue();
5623
5624  EVT VT = N->getValueType(0);
5625  assert(VT.isVector() && "SimplifyVBinOp only works on vectors!");
5626
5627  EVT EltType = VT.getVectorElementType();
5628  SDValue LHS = N->getOperand(0);
5629  SDValue RHS = N->getOperand(1);
5630  SDValue Shuffle = XformToShuffleWithZero(N);
5631  if (Shuffle.getNode()) return Shuffle;
5632
5633  // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold
5634  // this operation.
5635  if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
5636      RHS.getOpcode() == ISD::BUILD_VECTOR) {
5637    SmallVector<SDValue, 8> Ops;
5638    for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
5639      SDValue LHSOp = LHS.getOperand(i);
5640      SDValue RHSOp = RHS.getOperand(i);
5641      // If these two elements can't be folded, bail out.
5642      if ((LHSOp.getOpcode() != ISD::UNDEF &&
5643           LHSOp.getOpcode() != ISD::Constant &&
5644           LHSOp.getOpcode() != ISD::ConstantFP) ||
5645          (RHSOp.getOpcode() != ISD::UNDEF &&
5646           RHSOp.getOpcode() != ISD::Constant &&
5647           RHSOp.getOpcode() != ISD::ConstantFP))
5648        break;
5649
5650      // Can't fold divide by zero.
5651      if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
5652          N->getOpcode() == ISD::FDIV) {
5653        if ((RHSOp.getOpcode() == ISD::Constant &&
5654             cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) ||
5655            (RHSOp.getOpcode() == ISD::ConstantFP &&
5656             cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero()))
5657          break;
5658      }
5659
5660      Ops.push_back(DAG.getNode(N->getOpcode(), LHS.getDebugLoc(),
5661                                EltType, LHSOp, RHSOp));
5662      AddToWorkList(Ops.back().getNode());
5663      assert((Ops.back().getOpcode() == ISD::UNDEF ||
5664              Ops.back().getOpcode() == ISD::Constant ||
5665              Ops.back().getOpcode() == ISD::ConstantFP) &&
5666             "Scalar binop didn't fold!");
5667    }
5668
5669    if (Ops.size() == LHS.getNumOperands()) {
5670      EVT VT = LHS.getValueType();
5671      return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
5672                         &Ops[0], Ops.size());
5673    }
5674  }
5675
5676  return SDValue();
5677}
5678
5679SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0,
5680                                    SDValue N1, SDValue N2){
5681  assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
5682
5683  SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
5684                                 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5685
5686  // If we got a simplified select_cc node back from SimplifySelectCC, then
5687  // break it down into a new SETCC node, and a new SELECT node, and then return
5688  // the SELECT node, since we were called with a SELECT node.
5689  if (SCC.getNode()) {
5690    // Check to see if we got a select_cc back (to turn into setcc/select).
5691    // Otherwise, just return whatever node we got back, like fabs.
5692    if (SCC.getOpcode() == ISD::SELECT_CC) {
5693      SDValue SETCC = DAG.getNode(ISD::SETCC, N0.getDebugLoc(),
5694                                  N0.getValueType(),
5695                                  SCC.getOperand(0), SCC.getOperand(1),
5696                                  SCC.getOperand(4));
5697      AddToWorkList(SETCC.getNode());
5698      return DAG.getNode(ISD::SELECT, SCC.getDebugLoc(), SCC.getValueType(),
5699                         SCC.getOperand(2), SCC.getOperand(3), SETCC);
5700    }
5701
5702    return SCC;
5703  }
5704  return SDValue();
5705}
5706
5707/// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS
5708/// are the two values being selected between, see if we can simplify the
5709/// select.  Callers of this should assume that TheSelect is deleted if this
5710/// returns true.  As such, they should return the appropriate thing (e.g. the
5711/// node) back to the top-level of the DAG combiner loop to avoid it being
5712/// looked at.
5713bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
5714                                    SDValue RHS) {
5715
5716  // If this is a select from two identical things, try to pull the operation
5717  // through the select.
5718  if (LHS.getOpcode() == RHS.getOpcode() && LHS.hasOneUse() && RHS.hasOneUse()){
5719    // If this is a load and the token chain is identical, replace the select
5720    // of two loads with a load through a select of the address to load from.
5721    // This triggers in things like "select bool X, 10.0, 123.0" after the FP
5722    // constants have been dropped into the constant pool.
5723    if (LHS.getOpcode() == ISD::LOAD &&
5724        // Do not let this transformation reduce the number of volatile loads.
5725        !cast<LoadSDNode>(LHS)->isVolatile() &&
5726        !cast<LoadSDNode>(RHS)->isVolatile() &&
5727        // Token chains must be identical.
5728        LHS.getOperand(0) == RHS.getOperand(0)) {
5729      LoadSDNode *LLD = cast<LoadSDNode>(LHS);
5730      LoadSDNode *RLD = cast<LoadSDNode>(RHS);
5731
5732      // If this is an EXTLOAD, the VT's must match.
5733      if (LLD->getMemoryVT() == RLD->getMemoryVT()) {
5734        // FIXME: this conflates two src values, discarding one.  This is not
5735        // the right thing to do, but nothing uses srcvalues now.  When they do,
5736        // turn SrcValue into a list of locations.
5737        SDValue Addr;
5738        if (TheSelect->getOpcode() == ISD::SELECT) {
5739          // Check that the condition doesn't reach either load.  If so, folding
5740          // this will induce a cycle into the DAG.
5741          if (!LLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
5742              !RLD->isPredecessorOf(TheSelect->getOperand(0).getNode())) {
5743            Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
5744                               LLD->getBasePtr().getValueType(),
5745                               TheSelect->getOperand(0), LLD->getBasePtr(),
5746                               RLD->getBasePtr());
5747          }
5748        } else {
5749          // Check that the condition doesn't reach either load.  If so, folding
5750          // this will induce a cycle into the DAG.
5751          if (!LLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
5752              !RLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
5753              !LLD->isPredecessorOf(TheSelect->getOperand(1).getNode()) &&
5754              !RLD->isPredecessorOf(TheSelect->getOperand(1).getNode())) {
5755            Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
5756                               LLD->getBasePtr().getValueType(),
5757                               TheSelect->getOperand(0),
5758                               TheSelect->getOperand(1),
5759                               LLD->getBasePtr(), RLD->getBasePtr(),
5760                               TheSelect->getOperand(4));
5761          }
5762        }
5763
5764        if (Addr.getNode()) {
5765          SDValue Load;
5766          if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
5767            Load = DAG.getLoad(TheSelect->getValueType(0),
5768                               TheSelect->getDebugLoc(),
5769                               LLD->getChain(),
5770                               Addr,LLD->getSrcValue(),
5771                               LLD->getSrcValueOffset(),
5772                               LLD->isVolatile(),
5773                               LLD->getAlignment());
5774          } else {
5775            Load = DAG.getExtLoad(LLD->getExtensionType(),
5776                                  TheSelect->getDebugLoc(),
5777                                  TheSelect->getValueType(0),
5778                                  LLD->getChain(), Addr, LLD->getSrcValue(),
5779                                  LLD->getSrcValueOffset(),
5780                                  LLD->getMemoryVT(),
5781                                  LLD->isVolatile(),
5782                                  LLD->getAlignment());
5783          }
5784
5785          // Users of the select now use the result of the load.
5786          CombineTo(TheSelect, Load);
5787
5788          // Users of the old loads now use the new load's chain.  We know the
5789          // old-load value is dead now.
5790          CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
5791          CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
5792          return true;
5793        }
5794      }
5795    }
5796  }
5797
5798  return false;
5799}
5800
5801/// SimplifySelectCC - Simplify an expression of the form (N0 cond N1) ? N2 : N3
5802/// where 'cond' is the comparison specified by CC.
5803SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
5804                                      SDValue N2, SDValue N3,
5805                                      ISD::CondCode CC, bool NotExtCompare) {
5806  // (x ? y : y) -> y.
5807  if (N2 == N3) return N2;
5808
5809  EVT VT = N2.getValueType();
5810  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
5811  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
5812  ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
5813
5814  // Determine if the condition we're dealing with is constant
5815  SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()),
5816                              N0, N1, CC, DL, false);
5817  if (SCC.getNode()) AddToWorkList(SCC.getNode());
5818  ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode());
5819
5820  // fold select_cc true, x, y -> x
5821  if (SCCC && !SCCC->isNullValue())
5822    return N2;
5823  // fold select_cc false, x, y -> y
5824  if (SCCC && SCCC->isNullValue())
5825    return N3;
5826
5827  // Check to see if we can simplify the select into an fabs node
5828  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
5829    // Allow either -0.0 or 0.0
5830    if (CFP->getValueAPF().isZero()) {
5831      // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
5832      if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
5833          N0 == N2 && N3.getOpcode() == ISD::FNEG &&
5834          N2 == N3.getOperand(0))
5835        return DAG.getNode(ISD::FABS, DL, VT, N0);
5836
5837      // select (setl[te] X, +/-0.0), fneg(X), X -> fabs
5838      if ((CC == ISD::SETLT || CC == ISD::SETLE) &&
5839          N0 == N3 && N2.getOpcode() == ISD::FNEG &&
5840          N2.getOperand(0) == N3)
5841        return DAG.getNode(ISD::FABS, DL, VT, N3);
5842    }
5843  }
5844
5845  // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
5846  // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
5847  // in it.  This is a win when the constant is not otherwise available because
5848  // it replaces two constant pool loads with one.  We only do this if the FP
5849  // type is known to be legal, because if it isn't, then we are before legalize
5850  // types an we want the other legalization to happen first (e.g. to avoid
5851  // messing with soft float) and if the ConstantFP is not legal, because if
5852  // it is legal, we may not need to store the FP constant in a constant pool.
5853  if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2))
5854    if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) {
5855      if (TLI.isTypeLegal(N2.getValueType()) &&
5856          (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) !=
5857           TargetLowering::Legal) &&
5858          // If both constants have multiple uses, then we won't need to do an
5859          // extra load, they are likely around in registers for other users.
5860          (TV->hasOneUse() || FV->hasOneUse())) {
5861        Constant *Elts[] = {
5862          const_cast<ConstantFP*>(FV->getConstantFPValue()),
5863          const_cast<ConstantFP*>(TV->getConstantFPValue())
5864        };
5865        const Type *FPTy = Elts[0]->getType();
5866        const TargetData &TD = *TLI.getTargetData();
5867
5868        // Create a ConstantArray of the two constants.
5869        Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2);
5870        SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
5871                                            TD.getPrefTypeAlignment(FPTy));
5872        unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
5873
5874        // Get the offsets to the 0 and 1 element of the array so that we can
5875        // select between them.
5876        SDValue Zero = DAG.getIntPtrConstant(0);
5877        unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
5878        SDValue One = DAG.getIntPtrConstant(EltSize);
5879
5880        SDValue Cond = DAG.getSetCC(DL,
5881                                    TLI.getSetCCResultType(N0.getValueType()),
5882                                    N0, N1, CC);
5883        SDValue CstOffset = DAG.getNode(ISD::SELECT, DL, Zero.getValueType(),
5884                                        Cond, One, Zero);
5885        CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx,
5886                            CstOffset);
5887        return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
5888                           PseudoSourceValue::getConstantPool(), 0, false,
5889                           Alignment);
5890
5891      }
5892    }
5893
5894  // Check to see if we can perform the "gzip trick", transforming
5895  // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
5896  if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
5897      N0.getValueType().isInteger() &&
5898      N2.getValueType().isInteger() &&
5899      (N1C->isNullValue() ||                         // (a < 0) ? b : 0
5900       (N1C->getAPIntValue() == 1 && N0 == N2))) {   // (a < 1) ? a : 0
5901    EVT XType = N0.getValueType();
5902    EVT AType = N2.getValueType();
5903    if (XType.bitsGE(AType)) {
5904      // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
5905      // single-bit constant.
5906      if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) {
5907        unsigned ShCtV = N2C->getAPIntValue().logBase2();
5908        ShCtV = XType.getSizeInBits()-ShCtV-1;
5909        SDValue ShCt = DAG.getConstant(ShCtV, getShiftAmountTy());
5910        SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(),
5911                                    XType, N0, ShCt);
5912        AddToWorkList(Shift.getNode());
5913
5914        if (XType.bitsGT(AType)) {
5915          Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
5916          AddToWorkList(Shift.getNode());
5917        }
5918
5919        return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
5920      }
5921
5922      SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(),
5923                                  XType, N0,
5924                                  DAG.getConstant(XType.getSizeInBits()-1,
5925                                                  getShiftAmountTy()));
5926      AddToWorkList(Shift.getNode());
5927
5928      if (XType.bitsGT(AType)) {
5929        Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
5930        AddToWorkList(Shift.getNode());
5931      }
5932
5933      return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
5934    }
5935  }
5936
5937  // fold select C, 16, 0 -> shl C, 4
5938  if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
5939      TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent) {
5940
5941    // If the caller doesn't want us to simplify this into a zext of a compare,
5942    // don't do it.
5943    if (NotExtCompare && N2C->getAPIntValue() == 1)
5944      return SDValue();
5945
5946    // Get a SetCC of the condition
5947    // FIXME: Should probably make sure that setcc is legal if we ever have a
5948    // target where it isn't.
5949    SDValue Temp, SCC;
5950    // cast from setcc result type to select result type
5951    if (LegalTypes) {
5952      SCC  = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()),
5953                          N0, N1, CC);
5954      if (N2.getValueType().bitsLT(SCC.getValueType()))
5955        Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), N2.getValueType());
5956      else
5957        Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
5958                           N2.getValueType(), SCC);
5959    } else {
5960      SCC  = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC);
5961      Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
5962                         N2.getValueType(), SCC);
5963    }
5964
5965    AddToWorkList(SCC.getNode());
5966    AddToWorkList(Temp.getNode());
5967
5968    if (N2C->getAPIntValue() == 1)
5969      return Temp;
5970
5971    // shl setcc result by log2 n2c
5972    return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
5973                       DAG.getConstant(N2C->getAPIntValue().logBase2(),
5974                                       getShiftAmountTy()));
5975  }
5976
5977  // Check to see if this is the equivalent of setcc
5978  // FIXME: Turn all of these into setcc if setcc if setcc is legal
5979  // otherwise, go ahead with the folds.
5980  if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
5981    EVT XType = N0.getValueType();
5982    if (!LegalOperations ||
5983        TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(XType))) {
5984      SDValue Res = DAG.getSetCC(DL, TLI.getSetCCResultType(XType), N0, N1, CC);
5985      if (Res.getValueType() != VT)
5986        Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res);
5987      return Res;
5988    }
5989
5990    // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X))))
5991    if (N1C && N1C->isNullValue() && CC == ISD::SETEQ &&
5992        (!LegalOperations ||
5993         TLI.isOperationLegal(ISD::CTLZ, XType))) {
5994      SDValue Ctlz = DAG.getNode(ISD::CTLZ, N0.getDebugLoc(), XType, N0);
5995      return DAG.getNode(ISD::SRL, DL, XType, Ctlz,
5996                         DAG.getConstant(Log2_32(XType.getSizeInBits()),
5997                                         getShiftAmountTy()));
5998    }
5999    // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
6000    if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
6001      SDValue NegN0 = DAG.getNode(ISD::SUB, N0.getDebugLoc(),
6002                                  XType, DAG.getConstant(0, XType), N0);
6003      SDValue NotN0 = DAG.getNOT(N0.getDebugLoc(), N0, XType);
6004      return DAG.getNode(ISD::SRL, DL, XType,
6005                         DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0),
6006                         DAG.getConstant(XType.getSizeInBits()-1,
6007                                         getShiftAmountTy()));
6008    }
6009    // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
6010    if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
6011      SDValue Sign = DAG.getNode(ISD::SRL, N0.getDebugLoc(), XType, N0,
6012                                 DAG.getConstant(XType.getSizeInBits()-1,
6013                                                 getShiftAmountTy()));
6014      return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType));
6015    }
6016  }
6017
6018  // Check to see if this is an integer abs. select_cc setl[te] X, 0, -X, X ->
6019  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
6020  if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
6021      N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
6022      N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
6023    EVT XType = N0.getValueType();
6024    SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, N0,
6025                                DAG.getConstant(XType.getSizeInBits()-1,
6026                                                getShiftAmountTy()));
6027    SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), XType,
6028                              N0, Shift);
6029    AddToWorkList(Shift.getNode());
6030    AddToWorkList(Add.getNode());
6031    return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
6032  }
6033  // Check to see if this is an integer abs. select_cc setgt X, -1, X, -X ->
6034  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
6035  if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
6036      N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
6037    if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
6038      EVT XType = N0.getValueType();
6039      if (SubC->isNullValue() && XType.isInteger()) {
6040        SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
6041                                    N0,
6042                                    DAG.getConstant(XType.getSizeInBits()-1,
6043                                                    getShiftAmountTy()));
6044        SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
6045                                  XType, N0, Shift);
6046        AddToWorkList(Shift.getNode());
6047        AddToWorkList(Add.getNode());
6048        return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
6049      }
6050    }
6051  }
6052
6053  return SDValue();
6054}
6055
6056/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC.
6057SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
6058                                   SDValue N1, ISD::CondCode Cond,
6059                                   DebugLoc DL, bool foldBooleans) {
6060  TargetLowering::DAGCombinerInfo
6061    DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this);
6062  return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
6063}
6064
6065/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
6066/// return a DAG expression to select that will generate the same value by
6067/// multiplying by a magic number.  See:
6068/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
6069SDValue DAGCombiner::BuildSDIV(SDNode *N) {
6070  std::vector<SDNode*> Built;
6071  SDValue S = TLI.BuildSDIV(N, DAG, &Built);
6072
6073  for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
6074       ii != ee; ++ii)
6075    AddToWorkList(*ii);
6076  return S;
6077}
6078
6079/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
6080/// return a DAG expression to select that will generate the same value by
6081/// multiplying by a magic number.  See:
6082/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
6083SDValue DAGCombiner::BuildUDIV(SDNode *N) {
6084  std::vector<SDNode*> Built;
6085  SDValue S = TLI.BuildUDIV(N, DAG, &Built);
6086
6087  for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
6088       ii != ee; ++ii)
6089    AddToWorkList(*ii);
6090  return S;
6091}
6092
6093/// FindBaseOffset - Return true if base is a frame index, which is known not
6094// to alias with anything but itself.  Provides base object and offset as results.
6095static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
6096                           GlobalValue *&GV, void *&CV) {
6097  // Assume it is a primitive operation.
6098  Base = Ptr; Offset = 0; GV = 0; CV = 0;
6099
6100  // If it's an adding a simple constant then integrate the offset.
6101  if (Base.getOpcode() == ISD::ADD) {
6102    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) {
6103      Base = Base.getOperand(0);
6104      Offset += C->getZExtValue();
6105    }
6106  }
6107
6108  // Return the underlying GlobalValue, and update the Offset.  Return false
6109  // for GlobalAddressSDNode since the same GlobalAddress may be represented
6110  // by multiple nodes with different offsets.
6111  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) {
6112    GV = G->getGlobal();
6113    Offset += G->getOffset();
6114    return false;
6115  }
6116
6117  // Return the underlying Constant value, and update the Offset.  Return false
6118  // for ConstantSDNodes since the same constant pool entry may be represented
6119  // by multiple nodes with different offsets.
6120  if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) {
6121    CV = C->isMachineConstantPoolEntry() ? (void *)C->getMachineCPVal()
6122                                         : (void *)C->getConstVal();
6123    Offset += C->getOffset();
6124    return false;
6125  }
6126  // If it's any of the following then it can't alias with anything but itself.
6127  return isa<FrameIndexSDNode>(Base);
6128}
6129
6130/// isAlias - Return true if there is any possibility that the two addresses
6131/// overlap.
6132bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
6133                          const Value *SrcValue1, int SrcValueOffset1,
6134                          unsigned SrcValueAlign1,
6135                          SDValue Ptr2, int64_t Size2,
6136                          const Value *SrcValue2, int SrcValueOffset2,
6137                          unsigned SrcValueAlign2) const {
6138  // If they are the same then they must be aliases.
6139  if (Ptr1 == Ptr2) return true;
6140
6141  // Gather base node and offset information.
6142  SDValue Base1, Base2;
6143  int64_t Offset1, Offset2;
6144  GlobalValue *GV1, *GV2;
6145  void *CV1, *CV2;
6146  bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
6147  bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);
6148
6149  // If they have a same base address then check to see if they overlap.
6150  if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
6151    return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
6152
6153  // If we know what the bases are, and they aren't identical, then we know they
6154  // cannot alias.
6155  if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
6156    return false;
6157
6158  // If we know required SrcValue1 and SrcValue2 have relatively large alignment
6159  // compared to the size and offset of the access, we may be able to prove they
6160  // do not alias.  This check is conservative for now to catch cases created by
6161  // splitting vector types.
6162  if ((SrcValueAlign1 == SrcValueAlign2) &&
6163      (SrcValueOffset1 != SrcValueOffset2) &&
6164      (Size1 == Size2) && (SrcValueAlign1 > Size1)) {
6165    int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
6166    int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
6167
6168    // There is no overlap between these relatively aligned accesses of similar
6169    // size, return no alias.
6170    if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
6171      return false;
6172  }
6173
6174  if (CombinerGlobalAA) {
6175    // Use alias analysis information.
6176    int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
6177    int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset;
6178    int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset;
6179    AliasAnalysis::AliasResult AAResult =
6180                             AA.alias(SrcValue1, Overlap1, SrcValue2, Overlap2);
6181    if (AAResult == AliasAnalysis::NoAlias)
6182      return false;
6183  }
6184
6185  // Otherwise we have to assume they alias.
6186  return true;
6187}
6188
6189/// FindAliasInfo - Extracts the relevant alias information from the memory
6190/// node.  Returns true if the operand was a load.
6191bool DAGCombiner::FindAliasInfo(SDNode *N,
6192                        SDValue &Ptr, int64_t &Size,
6193                        const Value *&SrcValue,
6194                        int &SrcValueOffset,
6195                        unsigned &SrcValueAlign) const {
6196  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
6197    Ptr = LD->getBasePtr();
6198    Size = LD->getMemoryVT().getSizeInBits() >> 3;
6199    SrcValue = LD->getSrcValue();
6200    SrcValueOffset = LD->getSrcValueOffset();
6201    SrcValueAlign = LD->getOriginalAlignment();
6202    return true;
6203  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
6204    Ptr = ST->getBasePtr();
6205    Size = ST->getMemoryVT().getSizeInBits() >> 3;
6206    SrcValue = ST->getSrcValue();
6207    SrcValueOffset = ST->getSrcValueOffset();
6208    SrcValueAlign = ST->getOriginalAlignment();
6209  } else {
6210    llvm_unreachable("FindAliasInfo expected a memory operand");
6211  }
6212
6213  return false;
6214}
6215
6216/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
6217/// looking for aliasing nodes and adding them to the Aliases vector.
6218void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
6219                                   SmallVector<SDValue, 8> &Aliases) {
6220  SmallVector<SDValue, 8> Chains;     // List of chains to visit.
6221  SmallPtrSet<SDNode *, 16> Visited;  // Visited node set.
6222
6223  // Get alias information for node.
6224  SDValue Ptr;
6225  int64_t Size;
6226  const Value *SrcValue;
6227  int SrcValueOffset;
6228  unsigned SrcValueAlign;
6229  bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
6230                              SrcValueAlign);
6231
6232  // Starting off.
6233  Chains.push_back(OriginalChain);
6234  unsigned Depth = 0;
6235
6236  // Look at each chain and determine if it is an alias.  If so, add it to the
6237  // aliases list.  If not, then continue up the chain looking for the next
6238  // candidate.
6239  while (!Chains.empty()) {
6240    SDValue Chain = Chains.back();
6241    Chains.pop_back();
6242
6243    // For TokenFactor nodes, look at each operand and only continue up the
6244    // chain until we find two aliases.  If we've seen two aliases, assume we'll
6245    // find more and revert to original chain since the xform is unlikely to be
6246    // profitable.
6247    //
6248    // FIXME: The depth check could be made to return the last non-aliasing
6249    // chain we found before we hit a tokenfactor rather than the original
6250    // chain.
6251    if (Depth > 6 || Aliases.size() == 2) {
6252      Aliases.clear();
6253      Aliases.push_back(OriginalChain);
6254      break;
6255    }
6256
6257    // Don't bother if we've been before.
6258    if (!Visited.insert(Chain.getNode()))
6259      continue;
6260
6261    switch (Chain.getOpcode()) {
6262    case ISD::EntryToken:
6263      // Entry token is ideal chain operand, but handled in FindBetterChain.
6264      break;
6265
6266    case ISD::LOAD:
6267    case ISD::STORE: {
6268      // Get alias information for Chain.
6269      SDValue OpPtr;
6270      int64_t OpSize;
6271      const Value *OpSrcValue;
6272      int OpSrcValueOffset;
6273      unsigned OpSrcValueAlign;
6274      bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
6275                                    OpSrcValue, OpSrcValueOffset,
6276                                    OpSrcValueAlign);
6277
6278      // If chain is alias then stop here.
6279      if (!(IsLoad && IsOpLoad) &&
6280          isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign,
6281                  OpPtr, OpSize, OpSrcValue, OpSrcValueOffset,
6282                  OpSrcValueAlign)) {
6283        Aliases.push_back(Chain);
6284      } else {
6285        // Look further up the chain.
6286        Chains.push_back(Chain.getOperand(0));
6287        ++Depth;
6288      }
6289      break;
6290    }
6291
6292    case ISD::TokenFactor:
6293      // We have to check each of the operands of the token factor for "small"
6294      // token factors, so we queue them up.  Adding the operands to the queue
6295      // (stack) in reverse order maintains the original order and increases the
6296      // likelihood that getNode will find a matching token factor (CSE.)
6297      if (Chain.getNumOperands() > 16) {
6298        Aliases.push_back(Chain);
6299        break;
6300      }
6301      for (unsigned n = Chain.getNumOperands(); n;)
6302        Chains.push_back(Chain.getOperand(--n));
6303      ++Depth;
6304      break;
6305
6306    default:
6307      // For all other instructions we will just have to take what we can get.
6308      Aliases.push_back(Chain);
6309      break;
6310    }
6311  }
6312}
6313
6314/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking
6315/// for a better chain (aliasing node.)
6316SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
6317  SmallVector<SDValue, 8> Aliases;  // Ops for replacing token factor.
6318
6319  // Accumulate all the aliases to this node.
6320  GatherAllAliases(N, OldChain, Aliases);
6321
6322  if (Aliases.size() == 0) {
6323    // If no operands then chain to entry token.
6324    return DAG.getEntryNode();
6325  } else if (Aliases.size() == 1) {
6326    // If a single operand then chain to it.  We don't need to revisit it.
6327    return Aliases[0];
6328  }
6329
6330  // Construct a custom tailored token factor.
6331  return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
6332                     &Aliases[0], Aliases.size());
6333}
6334
6335// SelectionDAG::Combine - This is the entry point for the file.
6336//
6337void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA,
6338                           CodeGenOpt::Level OptLevel) {
6339  /// run - This is the main entry point to this class.
6340  ///
6341  DAGCombiner(*this, AA, OptLevel).Run(Level);
6342}
6343