DAGCombiner.cpp revision 55b2b9d20c04019c67efeb9fcc7045b11f8e23b3
1//===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass combines dag nodes to form fewer, simpler DAG nodes.  It can be run
11// both before and after the DAG is legalized.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "dagcombine"
16#include "llvm/CodeGen/SelectionDAG.h"
17#include "llvm/CodeGen/MachineFunction.h"
18#include "llvm/CodeGen/MachineFrameInfo.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Target/TargetData.h"
21#include "llvm/Target/TargetFrameInfo.h"
22#include "llvm/Target/TargetLowering.h"
23#include "llvm/Target/TargetMachine.h"
24#include "llvm/Target/TargetOptions.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Support/Compiler.h"
28#include "llvm/Support/CommandLine.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/MathExtras.h"
31#include <algorithm>
32#include <set>
33using namespace llvm;
34
35STATISTIC(NodesCombined   , "Number of dag nodes combined");
36STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
37STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
38
39namespace {
40  static cl::opt<bool>
41    CombinerAA("combiner-alias-analysis", cl::Hidden,
42               cl::desc("Turn on alias analysis during testing"));
43
44  static cl::opt<bool>
45    CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
46               cl::desc("Include global information in alias analysis"));
47
48//------------------------------ DAGCombiner ---------------------------------//
49
50  class VISIBILITY_HIDDEN DAGCombiner {
51    SelectionDAG &DAG;
52    const TargetLowering &TLI;
53    CombineLevel Level;
54    bool LegalOperations;
55    bool LegalTypes;
56    bool Fast;
57
58    // Worklist of all of the nodes that need to be simplified.
59    std::vector<SDNode*> WorkList;
60
61    // AA - Used for DAG load/store alias analysis.
62    AliasAnalysis &AA;
63
64    /// AddUsersToWorkList - When an instruction is simplified, add all users of
65    /// the instruction to the work lists because they might get more simplified
66    /// now.
67    ///
68    void AddUsersToWorkList(SDNode *N) {
69      for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
70           UI != UE; ++UI)
71        AddToWorkList(*UI);
72    }
73
74    /// visit - call the node-specific routine that knows how to fold each
75    /// particular type of node.
76    SDValue visit(SDNode *N);
77
78  public:
79    /// AddToWorkList - Add to the work list making sure it's instance is at the
80    /// the back (next to be processed.)
81    void AddToWorkList(SDNode *N) {
82      removeFromWorkList(N);
83      WorkList.push_back(N);
84    }
85
86    /// removeFromWorkList - remove all instances of N from the worklist.
87    ///
88    void removeFromWorkList(SDNode *N) {
89      WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), N),
90                     WorkList.end());
91    }
92
93    SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
94                        bool AddTo = true);
95
96    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
97      return CombineTo(N, &Res, 1, AddTo);
98    }
99
100    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
101                        bool AddTo = true) {
102      SDValue To[] = { Res0, Res1 };
103      return CombineTo(N, To, 2, AddTo);
104    }
105
106    void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
107
108  private:
109
110    /// SimplifyDemandedBits - Check the specified integer node value to see if
111    /// it can be simplified or if things it uses can be simplified by bit
112    /// propagation.  If so, return true.
113    bool SimplifyDemandedBits(SDValue Op) {
114      APInt Demanded = APInt::getAllOnesValue(Op.getValueSizeInBits());
115      return SimplifyDemandedBits(Op, Demanded);
116    }
117
118    bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded);
119
120    bool CombineToPreIndexedLoadStore(SDNode *N);
121    bool CombineToPostIndexedLoadStore(SDNode *N);
122
123
124    /// combine - call the node-specific routine that knows how to fold each
125    /// particular type of node. If that doesn't do anything, try the
126    /// target-specific DAG combines.
127    SDValue combine(SDNode *N);
128
129    // Visitation implementation - Implement dag node combining for different
130    // node types.  The semantics are as follows:
131    // Return Value:
132    //   SDValue.getNode() == 0 - No change was made
133    //   SDValue.getNode() == N - N was replaced, is dead and has been handled.
134    //   otherwise              - N should be replaced by the returned Operand.
135    //
136    SDValue visitTokenFactor(SDNode *N);
137    SDValue visitMERGE_VALUES(SDNode *N);
138    SDValue visitADD(SDNode *N);
139    SDValue visitSUB(SDNode *N);
140    SDValue visitADDC(SDNode *N);
141    SDValue visitADDE(SDNode *N);
142    SDValue visitMUL(SDNode *N);
143    SDValue visitSDIV(SDNode *N);
144    SDValue visitUDIV(SDNode *N);
145    SDValue visitSREM(SDNode *N);
146    SDValue visitUREM(SDNode *N);
147    SDValue visitMULHU(SDNode *N);
148    SDValue visitMULHS(SDNode *N);
149    SDValue visitSMUL_LOHI(SDNode *N);
150    SDValue visitUMUL_LOHI(SDNode *N);
151    SDValue visitSDIVREM(SDNode *N);
152    SDValue visitUDIVREM(SDNode *N);
153    SDValue visitAND(SDNode *N);
154    SDValue visitOR(SDNode *N);
155    SDValue visitXOR(SDNode *N);
156    SDValue SimplifyVBinOp(SDNode *N);
157    SDValue visitSHL(SDNode *N);
158    SDValue visitSRA(SDNode *N);
159    SDValue visitSRL(SDNode *N);
160    SDValue visitCTLZ(SDNode *N);
161    SDValue visitCTTZ(SDNode *N);
162    SDValue visitCTPOP(SDNode *N);
163    SDValue visitSELECT(SDNode *N);
164    SDValue visitSELECT_CC(SDNode *N);
165    SDValue visitSETCC(SDNode *N);
166    SDValue visitSIGN_EXTEND(SDNode *N);
167    SDValue visitZERO_EXTEND(SDNode *N);
168    SDValue visitANY_EXTEND(SDNode *N);
169    SDValue visitSIGN_EXTEND_INREG(SDNode *N);
170    SDValue visitTRUNCATE(SDNode *N);
171    SDValue visitBIT_CONVERT(SDNode *N);
172    SDValue visitBUILD_PAIR(SDNode *N);
173    SDValue visitFADD(SDNode *N);
174    SDValue visitFSUB(SDNode *N);
175    SDValue visitFMUL(SDNode *N);
176    SDValue visitFDIV(SDNode *N);
177    SDValue visitFREM(SDNode *N);
178    SDValue visitFCOPYSIGN(SDNode *N);
179    SDValue visitSINT_TO_FP(SDNode *N);
180    SDValue visitUINT_TO_FP(SDNode *N);
181    SDValue visitFP_TO_SINT(SDNode *N);
182    SDValue visitFP_TO_UINT(SDNode *N);
183    SDValue visitFP_ROUND(SDNode *N);
184    SDValue visitFP_ROUND_INREG(SDNode *N);
185    SDValue visitFP_EXTEND(SDNode *N);
186    SDValue visitFNEG(SDNode *N);
187    SDValue visitFABS(SDNode *N);
188    SDValue visitBRCOND(SDNode *N);
189    SDValue visitBR_CC(SDNode *N);
190    SDValue visitLOAD(SDNode *N);
191    SDValue visitSTORE(SDNode *N);
192    SDValue visitINSERT_VECTOR_ELT(SDNode *N);
193    SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
194    SDValue visitBUILD_VECTOR(SDNode *N);
195    SDValue visitCONCAT_VECTORS(SDNode *N);
196    SDValue visitVECTOR_SHUFFLE(SDNode *N);
197
198    SDValue XformToShuffleWithZero(SDNode *N);
199    SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
200
201    SDValue visitShiftByConstant(SDNode *N, unsigned Amt);
202
203    bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
204    SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
205    SDValue SimplifySelect(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2);
206    SDValue SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2,
207                             SDValue N3, ISD::CondCode CC,
208                             bool NotExtCompare = false);
209    SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
210                          bool foldBooleans = true);
211    SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
212                                         unsigned HiOp);
213    SDValue CombineConsecutiveLoads(SDNode *N, MVT VT);
214    SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT);
215    SDValue BuildSDIV(SDNode *N);
216    SDValue BuildUDIV(SDNode *N);
217    SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
218    SDValue ReduceLoadWidth(SDNode *N);
219
220    SDValue GetDemandedBits(SDValue V, const APInt &Mask);
221
222    /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
223    /// looking for aliasing nodes and adding them to the Aliases vector.
224    void GatherAllAliases(SDNode *N, SDValue OriginalChain,
225                          SmallVector<SDValue, 8> &Aliases);
226
227    /// isAlias - Return true if there is any possibility that the two addresses
228    /// overlap.
229    bool isAlias(SDValue Ptr1, int64_t Size1,
230                 const Value *SrcValue1, int SrcValueOffset1,
231                 SDValue Ptr2, int64_t Size2,
232                 const Value *SrcValue2, int SrcValueOffset2) const;
233
234    /// FindAliasInfo - Extracts the relevant alias information from the memory
235    /// node.  Returns true if the operand was a load.
236    bool FindAliasInfo(SDNode *N,
237                       SDValue &Ptr, int64_t &Size,
238                       const Value *&SrcValue, int &SrcValueOffset) const;
239
240    /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
241    /// looking for a better chain (aliasing node.)
242    SDValue FindBetterChain(SDNode *N, SDValue Chain);
243
244    /// getShiftAmountTy - Returns a type large enough to hold any valid
245    /// shift amount - before type legalization these can be huge.
246    MVT getShiftAmountTy() {
247      return LegalTypes ?  TLI.getShiftAmountTy() : TLI.getPointerTy();
248    }
249
250public:
251    DAGCombiner(SelectionDAG &D, AliasAnalysis &A, bool fast)
252      : DAG(D),
253        TLI(D.getTargetLoweringInfo()),
254        Level(Unrestricted),
255        LegalOperations(false),
256        LegalTypes(false),
257        Fast(fast),
258        AA(A) {}
259
260    /// Run - runs the dag combiner on all nodes in the work list
261    void Run(CombineLevel AtLevel);
262  };
263}
264
265
266namespace {
267/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted
268/// nodes from the worklist.
269class VISIBILITY_HIDDEN WorkListRemover :
270  public SelectionDAG::DAGUpdateListener {
271  DAGCombiner &DC;
272public:
273  explicit WorkListRemover(DAGCombiner &dc) : DC(dc) {}
274
275  virtual void NodeDeleted(SDNode *N, SDNode *E) {
276    DC.removeFromWorkList(N);
277  }
278
279  virtual void NodeUpdated(SDNode *N) {
280    // Ignore updates.
281  }
282};
283}
284
285//===----------------------------------------------------------------------===//
286//  TargetLowering::DAGCombinerInfo implementation
287//===----------------------------------------------------------------------===//
288
289void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
290  ((DAGCombiner*)DC)->AddToWorkList(N);
291}
292
293SDValue TargetLowering::DAGCombinerInfo::
294CombineTo(SDNode *N, const std::vector<SDValue> &To) {
295  return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size());
296}
297
298SDValue TargetLowering::DAGCombinerInfo::
299CombineTo(SDNode *N, SDValue Res) {
300  return ((DAGCombiner*)DC)->CombineTo(N, Res);
301}
302
303
304SDValue TargetLowering::DAGCombinerInfo::
305CombineTo(SDNode *N, SDValue Res0, SDValue Res1) {
306  return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1);
307}
308
309void TargetLowering::DAGCombinerInfo::
310CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
311  return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
312}
313
314//===----------------------------------------------------------------------===//
315// Helper Functions
316//===----------------------------------------------------------------------===//
317
318/// isNegatibleForFree - Return 1 if we can compute the negated form of the
319/// specified expression for the same cost as the expression itself, or 2 if we
320/// can compute the negated form more cheaply than the expression itself.
321static char isNegatibleForFree(SDValue Op, bool LegalOperations,
322                               unsigned Depth = 0) {
323  // No compile time optimizations on this type.
324  if (Op.getValueType() == MVT::ppcf128)
325    return 0;
326
327  // fneg is removable even if it has multiple uses.
328  if (Op.getOpcode() == ISD::FNEG) return 2;
329
330  // Don't allow anything with multiple uses.
331  if (!Op.hasOneUse()) return 0;
332
333  // Don't recurse exponentially.
334  if (Depth > 6) return 0;
335
336  switch (Op.getOpcode()) {
337  default: return false;
338  case ISD::ConstantFP:
339    // Don't invert constant FP values after legalize.  The negated constant
340    // isn't necessarily legal.
341    return LegalOperations ? 0 : 1;
342  case ISD::FADD:
343    // FIXME: determine better conditions for this xform.
344    if (!UnsafeFPMath) return 0;
345
346    // fold (fsub (fadd A, B)) -> (fsub (fneg A), B)
347    if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
348      return V;
349    // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
350    return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
351  case ISD::FSUB:
352    // We can't turn -(A-B) into B-A when we honor signed zeros.
353    if (!UnsafeFPMath) return 0;
354
355    // fold (fneg (fsub A, B)) -> (fsub B, A)
356    return 1;
357
358  case ISD::FMUL:
359  case ISD::FDIV:
360    if (HonorSignDependentRoundingFPMath()) return 0;
361
362    // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
363    if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
364      return V;
365
366    return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
367
368  case ISD::FP_EXTEND:
369  case ISD::FP_ROUND:
370  case ISD::FSIN:
371    return isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1);
372  }
373}
374
375/// GetNegatedExpression - If isNegatibleForFree returns true, this function
376/// returns the newly negated expression.
377static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
378                                    bool LegalOperations, unsigned Depth = 0) {
379  // fneg is removable even if it has multiple uses.
380  if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
381
382  // Don't allow anything with multiple uses.
383  assert(Op.hasOneUse() && "Unknown reuse!");
384
385  assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
386  switch (Op.getOpcode()) {
387  default: assert(0 && "Unknown code");
388  case ISD::ConstantFP: {
389    APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
390    V.changeSign();
391    return DAG.getConstantFP(V, Op.getValueType());
392  }
393  case ISD::FADD:
394    // FIXME: determine better conditions for this xform.
395    assert(UnsafeFPMath);
396
397    // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
398    if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
399      return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
400                         GetNegatedExpression(Op.getOperand(0), DAG,
401                                              LegalOperations, Depth+1),
402                         Op.getOperand(1));
403    // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
404    return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
405                       GetNegatedExpression(Op.getOperand(1), DAG,
406                                            LegalOperations, Depth+1),
407                       Op.getOperand(0));
408  case ISD::FSUB:
409    // We can't turn -(A-B) into B-A when we honor signed zeros.
410    assert(UnsafeFPMath);
411
412    // fold (fneg (fsub 0, B)) -> B
413    if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
414      if (N0CFP->getValueAPF().isZero())
415        return Op.getOperand(1);
416
417    // fold (fneg (fsub A, B)) -> (fsub B, A)
418    return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
419                       Op.getOperand(1), Op.getOperand(0));
420
421  case ISD::FMUL:
422  case ISD::FDIV:
423    assert(!HonorSignDependentRoundingFPMath());
424
425    // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
426    if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
427      return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
428                         GetNegatedExpression(Op.getOperand(0), DAG,
429                                              LegalOperations, Depth+1),
430                         Op.getOperand(1));
431
432    // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
433    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
434                       Op.getOperand(0),
435                       GetNegatedExpression(Op.getOperand(1), DAG,
436                                            LegalOperations, Depth+1));
437
438  case ISD::FP_EXTEND:
439  case ISD::FSIN:
440    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
441                       GetNegatedExpression(Op.getOperand(0), DAG,
442                                            LegalOperations, Depth+1));
443  case ISD::FP_ROUND:
444      return DAG.getNode(ISD::FP_ROUND, Op.getDebugLoc(), Op.getValueType(),
445                         GetNegatedExpression(Op.getOperand(0), DAG,
446                                              LegalOperations, Depth+1),
447                         Op.getOperand(1));
448  }
449}
450
451
452// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc
453// that selects between the values 1 and 0, making it equivalent to a setcc.
454// Also, set the incoming LHS, RHS, and CC references to the appropriate
455// nodes based on the type of node we are checking.  This simplifies life a
456// bit for the callers.
457static bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
458                              SDValue &CC) {
459  if (N.getOpcode() == ISD::SETCC) {
460    LHS = N.getOperand(0);
461    RHS = N.getOperand(1);
462    CC  = N.getOperand(2);
463    return true;
464  }
465  if (N.getOpcode() == ISD::SELECT_CC &&
466      N.getOperand(2).getOpcode() == ISD::Constant &&
467      N.getOperand(3).getOpcode() == ISD::Constant &&
468      cast<ConstantSDNode>(N.getOperand(2))->getAPIntValue() == 1 &&
469      cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) {
470    LHS = N.getOperand(0);
471    RHS = N.getOperand(1);
472    CC  = N.getOperand(4);
473    return true;
474  }
475  return false;
476}
477
478// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only
479// one use.  If this is true, it allows the users to invert the operation for
480// free when it is profitable to do so.
481static bool isOneUseSetCC(SDValue N) {
482  SDValue N0, N1, N2;
483  if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
484    return true;
485  return false;
486}
487
488SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL,
489                                    SDValue N0, SDValue N1) {
490  MVT VT = N0.getValueType();
491  if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
492    if (isa<ConstantSDNode>(N1)) {
493      // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
494      SDValue OpNode =
495        DAG.FoldConstantArithmetic(Opc, VT,
496                                   cast<ConstantSDNode>(N0.getOperand(1)),
497                                   cast<ConstantSDNode>(N1));
498      return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
499    } else if (N0.hasOneUse()) {
500      // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use
501      SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT,
502                                   N0.getOperand(0), N1);
503      AddToWorkList(OpNode.getNode());
504      return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
505    }
506  }
507
508  if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) {
509    if (isa<ConstantSDNode>(N0)) {
510      // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
511      SDValue OpNode =
512        DAG.FoldConstantArithmetic(Opc, VT,
513                                   cast<ConstantSDNode>(N1.getOperand(1)),
514                                   cast<ConstantSDNode>(N0));
515      return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
516    } else if (N1.hasOneUse()) {
517      // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use
518      SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT,
519                                   N1.getOperand(0), N0);
520      AddToWorkList(OpNode.getNode());
521      return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
522    }
523  }
524
525  return SDValue();
526}
527
528SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
529                               bool AddTo) {
530  assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
531  ++NodesCombined;
532  DOUT << "\nReplacing.1 "; DEBUG(N->dump(&DAG));
533  DOUT << "\nWith: "; DEBUG(To[0].getNode()->dump(&DAG));
534  DOUT << " and " << NumTo-1 << " other values\n";
535  DEBUG(for (unsigned i = 0, e = NumTo; i != e; ++i)
536          assert(N->getValueType(i) == To[i].getValueType() &&
537                 "Cannot combine value to value of different type!"));
538  WorkListRemover DeadNodes(*this);
539  DAG.ReplaceAllUsesWith(N, To, &DeadNodes);
540
541  if (AddTo) {
542    // Push the new nodes and any users onto the worklist
543    for (unsigned i = 0, e = NumTo; i != e; ++i) {
544      AddToWorkList(To[i].getNode());
545      AddUsersToWorkList(To[i].getNode());
546    }
547  }
548
549  // Finally, if the node is now dead, remove it from the graph.  The node
550  // may not be dead if the replacement process recursively simplified to
551  // something else needing this node.
552  if (N->use_empty()) {
553    // Nodes can be reintroduced into the worklist.  Make sure we do not
554    // process a node that has been replaced.
555    removeFromWorkList(N);
556
557    // Finally, since the node is now dead, remove it from the graph.
558    DAG.DeleteNode(N);
559  }
560  return SDValue(N, 0);
561}
562
563void
564DAGCombiner::CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &
565                                                                          TLO) {
566  // Replace all uses.  If any nodes become isomorphic to other nodes and
567  // are deleted, make sure to remove them from our worklist.
568  WorkListRemover DeadNodes(*this);
569  DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
570
571  // Push the new node and any (possibly new) users onto the worklist.
572  AddToWorkList(TLO.New.getNode());
573  AddUsersToWorkList(TLO.New.getNode());
574
575  // Finally, if the node is now dead, remove it from the graph.  The node
576  // may not be dead if the replacement process recursively simplified to
577  // something else needing this node.
578  if (TLO.Old.getNode()->use_empty()) {
579    removeFromWorkList(TLO.Old.getNode());
580
581    // If the operands of this node are only used by the node, they will now
582    // be dead.  Make sure to visit them first to delete dead nodes early.
583    for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands(); i != e; ++i)
584      if (TLO.Old.getNode()->getOperand(i).getNode()->hasOneUse())
585        AddToWorkList(TLO.Old.getNode()->getOperand(i).getNode());
586
587    DAG.DeleteNode(TLO.Old.getNode());
588  }
589}
590
591/// SimplifyDemandedBits - Check the specified integer node value to see if
592/// it can be simplified or if things it uses can be simplified by bit
593/// propagation.  If so, return true.
594bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
595  TargetLowering::TargetLoweringOpt TLO(DAG);
596  APInt KnownZero, KnownOne;
597  if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
598    return false;
599
600  // Revisit the node.
601  AddToWorkList(Op.getNode());
602
603  // Replace the old value with the new one.
604  ++NodesCombined;
605  DOUT << "\nReplacing.2 "; DEBUG(TLO.Old.getNode()->dump(&DAG));
606  DOUT << "\nWith: "; DEBUG(TLO.New.getNode()->dump(&DAG));
607  DOUT << '\n';
608
609  CommitTargetLoweringOpt(TLO);
610  return true;
611}
612
613//===----------------------------------------------------------------------===//
614//  Main DAG Combiner implementation
615//===----------------------------------------------------------------------===//
616
617void DAGCombiner::Run(CombineLevel AtLevel) {
618  // set the instance variables, so that the various visit routines may use it.
619  Level = AtLevel;
620  LegalOperations = Level >= NoIllegalOperations;
621  LegalTypes = Level >= NoIllegalTypes;
622
623  // Add all the dag nodes to the worklist.
624  WorkList.reserve(DAG.allnodes_size());
625  for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
626       E = DAG.allnodes_end(); I != E; ++I)
627    WorkList.push_back(I);
628
629  // Create a dummy node (which is not added to allnodes), that adds a reference
630  // to the root node, preventing it from being deleted, and tracking any
631  // changes of the root.
632  HandleSDNode Dummy(DAG.getRoot());
633
634  // The root of the dag may dangle to deleted nodes until the dag combiner is
635  // done.  Set it to null to avoid confusion.
636  DAG.setRoot(SDValue());
637
638  // while the worklist isn't empty, inspect the node on the end of it and
639  // try and combine it.
640  while (!WorkList.empty()) {
641    SDNode *N = WorkList.back();
642    WorkList.pop_back();
643
644    // If N has no uses, it is dead.  Make sure to revisit all N's operands once
645    // N is deleted from the DAG, since they too may now be dead or may have a
646    // reduced number of uses, allowing other xforms.
647    if (N->use_empty() && N != &Dummy) {
648      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
649        AddToWorkList(N->getOperand(i).getNode());
650
651      DAG.DeleteNode(N);
652      continue;
653    }
654
655    SDValue RV = combine(N);
656
657    if (RV.getNode() == 0)
658      continue;
659
660    ++NodesCombined;
661
662    // If we get back the same node we passed in, rather than a new node or
663    // zero, we know that the node must have defined multiple values and
664    // CombineTo was used.  Since CombineTo takes care of the worklist
665    // mechanics for us, we have no work to do in this case.
666    if (RV.getNode() == N)
667      continue;
668
669    assert(N->getOpcode() != ISD::DELETED_NODE &&
670           RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
671           "Node was deleted but visit returned new node!");
672
673    DOUT << "\nReplacing.3 "; DEBUG(N->dump(&DAG));
674    DOUT << "\nWith: "; DEBUG(RV.getNode()->dump(&DAG));
675    DOUT << '\n';
676    WorkListRemover DeadNodes(*this);
677    if (N->getNumValues() == RV.getNode()->getNumValues())
678      DAG.ReplaceAllUsesWith(N, RV.getNode(), &DeadNodes);
679    else {
680      assert(N->getValueType(0) == RV.getValueType() &&
681             N->getNumValues() == 1 && "Type mismatch");
682      SDValue OpV = RV;
683      DAG.ReplaceAllUsesWith(N, &OpV, &DeadNodes);
684    }
685
686    // Push the new node and any users onto the worklist
687    AddToWorkList(RV.getNode());
688    AddUsersToWorkList(RV.getNode());
689
690    // Add any uses of the old node to the worklist in case this node is the
691    // last one that uses them.  They may become dead after this node is
692    // deleted.
693    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
694      AddToWorkList(N->getOperand(i).getNode());
695
696    // Finally, if the node is now dead, remove it from the graph.  The node
697    // may not be dead if the replacement process recursively simplified to
698    // something else needing this node.
699    if (N->use_empty()) {
700      // Nodes can be reintroduced into the worklist.  Make sure we do not
701      // process a node that has been replaced.
702      removeFromWorkList(N);
703
704      // Finally, since the node is now dead, remove it from the graph.
705      DAG.DeleteNode(N);
706    }
707  }
708
709  // If the root changed (e.g. it was a dead load, update the root).
710  DAG.setRoot(Dummy.getValue());
711}
712
713SDValue DAGCombiner::visit(SDNode *N) {
714  switch(N->getOpcode()) {
715  default: break;
716  case ISD::TokenFactor:        return visitTokenFactor(N);
717  case ISD::MERGE_VALUES:       return visitMERGE_VALUES(N);
718  case ISD::ADD:                return visitADD(N);
719  case ISD::SUB:                return visitSUB(N);
720  case ISD::ADDC:               return visitADDC(N);
721  case ISD::ADDE:               return visitADDE(N);
722  case ISD::MUL:                return visitMUL(N);
723  case ISD::SDIV:               return visitSDIV(N);
724  case ISD::UDIV:               return visitUDIV(N);
725  case ISD::SREM:               return visitSREM(N);
726  case ISD::UREM:               return visitUREM(N);
727  case ISD::MULHU:              return visitMULHU(N);
728  case ISD::MULHS:              return visitMULHS(N);
729  case ISD::SMUL_LOHI:          return visitSMUL_LOHI(N);
730  case ISD::UMUL_LOHI:          return visitUMUL_LOHI(N);
731  case ISD::SDIVREM:            return visitSDIVREM(N);
732  case ISD::UDIVREM:            return visitUDIVREM(N);
733  case ISD::AND:                return visitAND(N);
734  case ISD::OR:                 return visitOR(N);
735  case ISD::XOR:                return visitXOR(N);
736  case ISD::SHL:                return visitSHL(N);
737  case ISD::SRA:                return visitSRA(N);
738  case ISD::SRL:                return visitSRL(N);
739  case ISD::CTLZ:               return visitCTLZ(N);
740  case ISD::CTTZ:               return visitCTTZ(N);
741  case ISD::CTPOP:              return visitCTPOP(N);
742  case ISD::SELECT:             return visitSELECT(N);
743  case ISD::SELECT_CC:          return visitSELECT_CC(N);
744  case ISD::SETCC:              return visitSETCC(N);
745  case ISD::SIGN_EXTEND:        return visitSIGN_EXTEND(N);
746  case ISD::ZERO_EXTEND:        return visitZERO_EXTEND(N);
747  case ISD::ANY_EXTEND:         return visitANY_EXTEND(N);
748  case ISD::SIGN_EXTEND_INREG:  return visitSIGN_EXTEND_INREG(N);
749  case ISD::TRUNCATE:           return visitTRUNCATE(N);
750  case ISD::BIT_CONVERT:        return visitBIT_CONVERT(N);
751  case ISD::BUILD_PAIR:         return visitBUILD_PAIR(N);
752  case ISD::FADD:               return visitFADD(N);
753  case ISD::FSUB:               return visitFSUB(N);
754  case ISD::FMUL:               return visitFMUL(N);
755  case ISD::FDIV:               return visitFDIV(N);
756  case ISD::FREM:               return visitFREM(N);
757  case ISD::FCOPYSIGN:          return visitFCOPYSIGN(N);
758  case ISD::SINT_TO_FP:         return visitSINT_TO_FP(N);
759  case ISD::UINT_TO_FP:         return visitUINT_TO_FP(N);
760  case ISD::FP_TO_SINT:         return visitFP_TO_SINT(N);
761  case ISD::FP_TO_UINT:         return visitFP_TO_UINT(N);
762  case ISD::FP_ROUND:           return visitFP_ROUND(N);
763  case ISD::FP_ROUND_INREG:     return visitFP_ROUND_INREG(N);
764  case ISD::FP_EXTEND:          return visitFP_EXTEND(N);
765  case ISD::FNEG:               return visitFNEG(N);
766  case ISD::FABS:               return visitFABS(N);
767  case ISD::BRCOND:             return visitBRCOND(N);
768  case ISD::BR_CC:              return visitBR_CC(N);
769  case ISD::LOAD:               return visitLOAD(N);
770  case ISD::STORE:              return visitSTORE(N);
771  case ISD::INSERT_VECTOR_ELT:  return visitINSERT_VECTOR_ELT(N);
772  case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
773  case ISD::BUILD_VECTOR:       return visitBUILD_VECTOR(N);
774  case ISD::CONCAT_VECTORS:     return visitCONCAT_VECTORS(N);
775  case ISD::VECTOR_SHUFFLE:     return visitVECTOR_SHUFFLE(N);
776  }
777  return SDValue();
778}
779
780SDValue DAGCombiner::combine(SDNode *N) {
781  SDValue RV = visit(N);
782
783  // If nothing happened, try a target-specific DAG combine.
784  if (RV.getNode() == 0) {
785    assert(N->getOpcode() != ISD::DELETED_NODE &&
786           "Node was deleted but visit returned NULL!");
787
788    if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
789        TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
790
791      // Expose the DAG combiner to the target combiner impls.
792      TargetLowering::DAGCombinerInfo
793        DagCombineInfo(DAG, Level == Unrestricted, false, this);
794
795      RV = TLI.PerformDAGCombine(N, DagCombineInfo);
796    }
797  }
798
799  // If N is a commutative binary node, try commuting it to enable more
800  // sdisel CSE.
801  if (RV.getNode() == 0 &&
802      SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
803      N->getNumValues() == 1) {
804    SDValue N0 = N->getOperand(0);
805    SDValue N1 = N->getOperand(1);
806
807    // Constant operands are canonicalized to RHS.
808    if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
809      SDValue Ops[] = { N1, N0 };
810      SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(),
811                                            Ops, 2);
812      if (CSENode)
813        return SDValue(CSENode, 0);
814    }
815  }
816
817  return RV;
818}
819
820/// getInputChainForNode - Given a node, return its input chain if it has one,
821/// otherwise return a null sd operand.
822static SDValue getInputChainForNode(SDNode *N) {
823  if (unsigned NumOps = N->getNumOperands()) {
824    if (N->getOperand(0).getValueType() == MVT::Other)
825      return N->getOperand(0);
826    else if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
827      return N->getOperand(NumOps-1);
828    for (unsigned i = 1; i < NumOps-1; ++i)
829      if (N->getOperand(i).getValueType() == MVT::Other)
830        return N->getOperand(i);
831  }
832  return SDValue();
833}
834
835SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
836  // If N has two operands, where one has an input chain equal to the other,
837  // the 'other' chain is redundant.
838  if (N->getNumOperands() == 2) {
839    if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
840      return N->getOperand(0);
841    if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
842      return N->getOperand(1);
843  }
844
845  SmallVector<SDNode *, 8> TFs;     // List of token factors to visit.
846  SmallVector<SDValue, 8> Ops;    // Ops for replacing token factor.
847  SmallPtrSet<SDNode*, 16> SeenOps;
848  bool Changed = false;             // If we should replace this token factor.
849
850  // Start out with this token factor.
851  TFs.push_back(N);
852
853  // Iterate through token factors.  The TFs grows when new token factors are
854  // encountered.
855  for (unsigned i = 0; i < TFs.size(); ++i) {
856    SDNode *TF = TFs[i];
857
858    // Check each of the operands.
859    for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) {
860      SDValue Op = TF->getOperand(i);
861
862      switch (Op.getOpcode()) {
863      case ISD::EntryToken:
864        // Entry tokens don't need to be added to the list. They are
865        // rededundant.
866        Changed = true;
867        break;
868
869      case ISD::TokenFactor:
870        if ((CombinerAA || Op.hasOneUse()) &&
871            std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) {
872          // Queue up for processing.
873          TFs.push_back(Op.getNode());
874          // Clean up in case the token factor is removed.
875          AddToWorkList(Op.getNode());
876          Changed = true;
877          break;
878        }
879        // Fall thru
880
881      default:
882        // Only add if it isn't already in the list.
883        if (SeenOps.insert(Op.getNode()))
884          Ops.push_back(Op);
885        else
886          Changed = true;
887        break;
888      }
889    }
890  }
891
892  SDValue Result;
893
894  // If we've change things around then replace token factor.
895  if (Changed) {
896    if (Ops.empty()) {
897      // The entry token is the only possible outcome.
898      Result = DAG.getEntryNode();
899    } else {
900      // New and improved token factor.
901      Result = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
902                           MVT::Other, &Ops[0], Ops.size());
903    }
904
905    // Don't add users to work list.
906    return CombineTo(N, Result, false);
907  }
908
909  return Result;
910}
911
912/// MERGE_VALUES can always be eliminated.
913SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
914  WorkListRemover DeadNodes(*this);
915  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
916    DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i),
917                                  &DeadNodes);
918  removeFromWorkList(N);
919  DAG.DeleteNode(N);
920  return SDValue(N, 0);   // Return N so it doesn't get rechecked!
921}
922
923static
924SDValue combineShlAddConstant(DebugLoc DL, SDValue N0, SDValue N1,
925                              SelectionDAG &DAG) {
926  MVT VT = N0.getValueType();
927  SDValue N00 = N0.getOperand(0);
928  SDValue N01 = N0.getOperand(1);
929  ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01);
930
931  if (N01C && N00.getOpcode() == ISD::ADD && N00.getNode()->hasOneUse() &&
932      isa<ConstantSDNode>(N00.getOperand(1))) {
933    // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
934    N0 = DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT,
935                     DAG.getNode(ISD::SHL, N00.getDebugLoc(), VT,
936                                 N00.getOperand(0), N01),
937                     DAG.getNode(ISD::SHL, N01.getDebugLoc(), VT,
938                                 N00.getOperand(1), N01));
939    return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
940  }
941
942  return SDValue();
943}
944
945static
946SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
947                            SelectionDAG &DAG, const TargetLowering &TLI,
948                            bool LegalOperations) {
949  MVT VT = N->getValueType(0);
950  unsigned Opc = N->getOpcode();
951  bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
952  SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
953  SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
954  ISD::CondCode CC = ISD::SETCC_INVALID;
955
956  if (isSlctCC) {
957    CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
958  } else {
959    SDValue CCOp = Slct.getOperand(0);
960    if (CCOp.getOpcode() == ISD::SETCC)
961      CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
962  }
963
964  bool DoXform = false;
965  bool InvCC = false;
966  assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
967          "Bad input!");
968
969  if (LHS.getOpcode() == ISD::Constant &&
970      cast<ConstantSDNode>(LHS)->isNullValue()) {
971    DoXform = true;
972  } else if (CC != ISD::SETCC_INVALID &&
973             RHS.getOpcode() == ISD::Constant &&
974             cast<ConstantSDNode>(RHS)->isNullValue()) {
975    std::swap(LHS, RHS);
976    SDValue Op0 = Slct.getOperand(0);
977    MVT OpVT = isSlctCC ? Op0.getValueType() :
978                          Op0.getOperand(0).getValueType();
979    bool isInt = OpVT.isInteger();
980    CC = ISD::getSetCCInverse(CC, isInt);
981
982    if (LegalOperations && !TLI.isCondCodeLegal(CC, OpVT))
983      return SDValue();         // Inverse operator isn't legal.
984
985    DoXform = true;
986    InvCC = true;
987  }
988
989  if (DoXform) {
990    SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
991    if (isSlctCC)
992      return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
993                             Slct.getOperand(0), Slct.getOperand(1), CC);
994    SDValue CCOp = Slct.getOperand(0);
995    if (InvCC)
996      CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
997                          CCOp.getOperand(0), CCOp.getOperand(1), CC);
998    return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
999                       CCOp, OtherOp, Result);
1000  }
1001  return SDValue();
1002}
1003
1004SDValue DAGCombiner::visitADD(SDNode *N) {
1005  SDValue N0 = N->getOperand(0);
1006  SDValue N1 = N->getOperand(1);
1007  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1008  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1009  MVT VT = N0.getValueType();
1010
1011  // fold vector ops
1012  if (VT.isVector()) {
1013    SDValue FoldedVOp = SimplifyVBinOp(N);
1014    if (FoldedVOp.getNode()) return FoldedVOp;
1015  }
1016
1017  // fold (add x, undef) -> undef
1018  if (N0.getOpcode() == ISD::UNDEF)
1019    return N0;
1020  if (N1.getOpcode() == ISD::UNDEF)
1021    return N1;
1022  // fold (add c1, c2) -> c1+c2
1023  if (N0C && N1C)
1024    return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C);
1025  // canonicalize constant to RHS
1026  if (N0C && !N1C)
1027    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0);
1028  // fold (add x, 0) -> x
1029  if (N1C && N1C->isNullValue())
1030    return N0;
1031  // fold (add Sym, c) -> Sym+c
1032  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1033    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
1034        GA->getOpcode() == ISD::GlobalAddress)
1035      return DAG.getGlobalAddress(GA->getGlobal(), VT,
1036                                  GA->getOffset() +
1037                                    (uint64_t)N1C->getSExtValue());
1038  // fold ((c1-A)+c2) -> (c1+c2)-A
1039  if (N1C && N0.getOpcode() == ISD::SUB)
1040    if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0)))
1041      return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1042                         DAG.getConstant(N1C->getAPIntValue()+
1043                                         N0C->getAPIntValue(), VT),
1044                         N0.getOperand(1));
1045  // reassociate add
1046  SDValue RADD = ReassociateOps(ISD::ADD, N->getDebugLoc(), N0, N1);
1047  if (RADD.getNode() != 0)
1048    return RADD;
1049  // fold ((0-A) + B) -> B-A
1050  if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
1051      cast<ConstantSDNode>(N0.getOperand(0))->isNullValue())
1052    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, N0.getOperand(1));
1053  // fold (A + (0-B)) -> A-B
1054  if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) &&
1055      cast<ConstantSDNode>(N1.getOperand(0))->isNullValue())
1056    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1.getOperand(1));
1057  // fold (A+(B-A)) -> B
1058  if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
1059    return N1.getOperand(0);
1060  // fold ((B-A)+A) -> B
1061  if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
1062    return N0.getOperand(0);
1063  // fold (A+(B-(A+C))) to (B-C)
1064  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1065      N0 == N1.getOperand(1).getOperand(0))
1066    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0),
1067                       N1.getOperand(1).getOperand(1));
1068  // fold (A+(B-(C+A))) to (B-C)
1069  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1070      N0 == N1.getOperand(1).getOperand(1))
1071    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0),
1072                       N1.getOperand(1).getOperand(0));
1073  // fold (A+((B-A)+or-C)) to (B+or-C)
1074  if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
1075      N1.getOperand(0).getOpcode() == ISD::SUB &&
1076      N0 == N1.getOperand(0).getOperand(1))
1077    return DAG.getNode(N1.getOpcode(), N->getDebugLoc(), VT,
1078                       N1.getOperand(0).getOperand(0), N1.getOperand(1));
1079
1080  // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
1081  if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
1082    SDValue N00 = N0.getOperand(0);
1083    SDValue N01 = N0.getOperand(1);
1084    SDValue N10 = N1.getOperand(0);
1085    SDValue N11 = N1.getOperand(1);
1086
1087    if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10))
1088      return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1089                         DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, N00, N10),
1090                         DAG.getNode(ISD::ADD, N1.getDebugLoc(), VT, N01, N11));
1091  }
1092
1093  if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0)))
1094    return SDValue(N, 0);
1095
1096  // fold (a+b) -> (a|b) iff a and b share no bits.
1097  if (VT.isInteger() && !VT.isVector()) {
1098    APInt LHSZero, LHSOne;
1099    APInt RHSZero, RHSOne;
1100    APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
1101    DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
1102
1103    if (LHSZero.getBoolValue()) {
1104      DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
1105
1106      // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1107      // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1108      if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
1109          (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
1110        return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1);
1111    }
1112  }
1113
1114  // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
1115  if (N0.getOpcode() == ISD::SHL && N0.getNode()->hasOneUse()) {
1116    SDValue Result = combineShlAddConstant(N->getDebugLoc(), N0, N1, DAG);
1117    if (Result.getNode()) return Result;
1118  }
1119  if (N1.getOpcode() == ISD::SHL && N1.getNode()->hasOneUse()) {
1120    SDValue Result = combineShlAddConstant(N->getDebugLoc(), N1, N0, DAG);
1121    if (Result.getNode()) return Result;
1122  }
1123
1124  // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
1125  if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
1126    SDValue Result = combineSelectAndUse(N, N0, N1, DAG, TLI, LegalOperations);
1127    if (Result.getNode()) return Result;
1128  }
1129  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
1130    SDValue Result = combineSelectAndUse(N, N1, N0, DAG, TLI, LegalOperations);
1131    if (Result.getNode()) return Result;
1132  }
1133
1134  return SDValue();
1135}
1136
1137SDValue DAGCombiner::visitADDC(SDNode *N) {
1138  SDValue N0 = N->getOperand(0);
1139  SDValue N1 = N->getOperand(1);
1140  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1141  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1142  MVT VT = N0.getValueType();
1143
1144  // If the flag result is dead, turn this into an ADD.
1145  if (N->hasNUsesOfValue(0, 1))
1146    return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0),
1147                     DAG.getNode(ISD::CARRY_FALSE,
1148                                 N->getDebugLoc(), MVT::Flag));
1149
1150  // canonicalize constant to RHS.
1151  if (N0C && !N1C)
1152    return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
1153
1154  // fold (addc x, 0) -> x + no carry out
1155  if (N1C && N1C->isNullValue())
1156    return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
1157                                        N->getDebugLoc(), MVT::Flag));
1158
1159  // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
1160  APInt LHSZero, LHSOne;
1161  APInt RHSZero, RHSOne;
1162  APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
1163  DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
1164
1165  if (LHSZero.getBoolValue()) {
1166    DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
1167
1168    // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1169    // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1170    if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
1171        (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
1172      return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
1173                       DAG.getNode(ISD::CARRY_FALSE,
1174                                   N->getDebugLoc(), MVT::Flag));
1175  }
1176
1177  return SDValue();
1178}
1179
1180SDValue DAGCombiner::visitADDE(SDNode *N) {
1181  SDValue N0 = N->getOperand(0);
1182  SDValue N1 = N->getOperand(1);
1183  SDValue CarryIn = N->getOperand(2);
1184  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1185  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1186
1187  // canonicalize constant to RHS
1188  if (N0C && !N1C)
1189    return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(),
1190                       N1, N0, CarryIn);
1191
1192  // fold (adde x, y, false) -> (addc x, y)
1193  if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
1194    return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
1195
1196  return SDValue();
1197}
1198
1199SDValue DAGCombiner::visitSUB(SDNode *N) {
1200  SDValue N0 = N->getOperand(0);
1201  SDValue N1 = N->getOperand(1);
1202  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1203  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1204  MVT VT = N0.getValueType();
1205
1206  // fold vector ops
1207  if (VT.isVector()) {
1208    SDValue FoldedVOp = SimplifyVBinOp(N);
1209    if (FoldedVOp.getNode()) return FoldedVOp;
1210  }
1211
1212  // fold (sub x, x) -> 0
1213  if (N0 == N1)
1214    return DAG.getConstant(0, N->getValueType(0));
1215  // fold (sub c1, c2) -> c1-c2
1216  if (N0C && N1C)
1217    return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C);
1218  // fold (sub x, c) -> (add x, -c)
1219  if (N1C)
1220    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0,
1221                       DAG.getConstant(-N1C->getAPIntValue(), VT));
1222  // fold (A+B)-A -> B
1223  if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
1224    return N0.getOperand(1);
1225  // fold (A+B)-B -> A
1226  if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
1227    return N0.getOperand(0);
1228  // fold ((A+(B+or-C))-B) -> A+or-C
1229  if (N0.getOpcode() == ISD::ADD &&
1230      (N0.getOperand(1).getOpcode() == ISD::SUB ||
1231       N0.getOperand(1).getOpcode() == ISD::ADD) &&
1232      N0.getOperand(1).getOperand(0) == N1)
1233    return DAG.getNode(N0.getOperand(1).getOpcode(), N->getDebugLoc(), VT,
1234                       N0.getOperand(0), N0.getOperand(1).getOperand(1));
1235  // fold ((A+(C+B))-B) -> A+C
1236  if (N0.getOpcode() == ISD::ADD &&
1237      N0.getOperand(1).getOpcode() == ISD::ADD &&
1238      N0.getOperand(1).getOperand(1) == N1)
1239    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT,
1240                       N0.getOperand(0), N0.getOperand(1).getOperand(0));
1241  // fold ((A-(B-C))-C) -> A-B
1242  if (N0.getOpcode() == ISD::SUB &&
1243      N0.getOperand(1).getOpcode() == ISD::SUB &&
1244      N0.getOperand(1).getOperand(1) == N1)
1245    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1246                       N0.getOperand(0), N0.getOperand(1).getOperand(0));
1247  // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
1248  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
1249    SDValue Result = combineSelectAndUse(N, N1, N0, DAG, TLI, LegalOperations);
1250    if (Result.getNode()) return Result;
1251  }
1252
1253  // If either operand of a sub is undef, the result is undef
1254  if (N0.getOpcode() == ISD::UNDEF)
1255    return N0;
1256  if (N1.getOpcode() == ISD::UNDEF)
1257    return N1;
1258
1259  // If the relocation model supports it, consider symbol offsets.
1260  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1261    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
1262      // fold (sub Sym, c) -> Sym-c
1263      if (N1C && GA->getOpcode() == ISD::GlobalAddress)
1264        return DAG.getGlobalAddress(GA->getGlobal(), VT,
1265                                    GA->getOffset() -
1266                                      (uint64_t)N1C->getSExtValue());
1267      // fold (sub Sym+c1, Sym+c2) -> c1-c2
1268      if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
1269        if (GA->getGlobal() == GB->getGlobal())
1270          return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
1271                                 VT);
1272    }
1273
1274  return SDValue();
1275}
1276
1277SDValue DAGCombiner::visitMUL(SDNode *N) {
1278  SDValue N0 = N->getOperand(0);
1279  SDValue N1 = N->getOperand(1);
1280  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1281  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1282  MVT VT = N0.getValueType();
1283
1284  // fold vector ops
1285  if (VT.isVector()) {
1286    SDValue FoldedVOp = SimplifyVBinOp(N);
1287    if (FoldedVOp.getNode()) return FoldedVOp;
1288  }
1289
1290  // fold (mul x, undef) -> 0
1291  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1292    return DAG.getConstant(0, VT);
1293  // fold (mul c1, c2) -> c1*c2
1294  if (N0C && N1C)
1295    return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0C, N1C);
1296  // canonicalize constant to RHS
1297  if (N0C && !N1C)
1298    return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, N1, N0);
1299  // fold (mul x, 0) -> 0
1300  if (N1C && N1C->isNullValue())
1301    return N1;
1302  // fold (mul x, -1) -> 0-x
1303  if (N1C && N1C->isAllOnesValue())
1304    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1305                       DAG.getConstant(0, VT), N0);
1306  // fold (mul x, (1 << c)) -> x << c
1307  if (N1C && N1C->getAPIntValue().isPowerOf2())
1308    return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
1309                       DAG.getConstant(N1C->getAPIntValue().logBase2(),
1310                                       getShiftAmountTy()));
1311  // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
1312  if (N1C && isPowerOf2_64(-N1C->getSExtValue()))
1313    // FIXME: If the input is something that is easily negated (e.g. a
1314    // single-use add), we should put the negate there.
1315    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1316                       DAG.getConstant(0, VT),
1317                       DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
1318                            DAG.getConstant(Log2_64(-N1C->getSExtValue()),
1319                                            getShiftAmountTy())));
1320  // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
1321  if (N1C && N0.getOpcode() == ISD::SHL &&
1322      isa<ConstantSDNode>(N0.getOperand(1))) {
1323    SDValue C3 = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1324                             N1, N0.getOperand(1));
1325    AddToWorkList(C3.getNode());
1326    return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1327                       N0.getOperand(0), C3);
1328  }
1329
1330  // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
1331  // use.
1332  {
1333    SDValue Sh(0,0), Y(0,0);
1334    // Check for both (mul (shl X, C), Y)  and  (mul Y, (shl X, C)).
1335    if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
1336        N0.getNode()->hasOneUse()) {
1337      Sh = N0; Y = N1;
1338    } else if (N1.getOpcode() == ISD::SHL &&
1339               isa<ConstantSDNode>(N1.getOperand(1)) &&
1340               N1.getNode()->hasOneUse()) {
1341      Sh = N1; Y = N0;
1342    }
1343
1344    if (Sh.getNode()) {
1345      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1346                                Sh.getOperand(0), Y);
1347      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1348                         Mul, Sh.getOperand(1));
1349    }
1350  }
1351
1352  // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
1353  if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
1354      isa<ConstantSDNode>(N0.getOperand(1)))
1355    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT,
1356                       DAG.getNode(ISD::MUL, N0.getDebugLoc(), VT,
1357                                   N0.getOperand(0), N1),
1358                       DAG.getNode(ISD::MUL, N1.getDebugLoc(), VT,
1359                                   N0.getOperand(1), N1));
1360
1361  // reassociate mul
1362  SDValue RMUL = ReassociateOps(ISD::MUL, N->getDebugLoc(), N0, N1);
1363  if (RMUL.getNode() != 0)
1364    return RMUL;
1365
1366  return SDValue();
1367}
1368
1369SDValue DAGCombiner::visitSDIV(SDNode *N) {
1370  SDValue N0 = N->getOperand(0);
1371  SDValue N1 = N->getOperand(1);
1372  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1373  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1374  MVT VT = N->getValueType(0);
1375
1376  // fold vector ops
1377  if (VT.isVector()) {
1378    SDValue FoldedVOp = SimplifyVBinOp(N);
1379    if (FoldedVOp.getNode()) return FoldedVOp;
1380  }
1381
1382  // fold (sdiv c1, c2) -> c1/c2
1383  if (N0C && N1C && !N1C->isNullValue())
1384    return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C);
1385  // fold (sdiv X, 1) -> X
1386  if (N1C && N1C->getSExtValue() == 1LL)
1387    return N0;
1388  // fold (sdiv X, -1) -> 0-X
1389  if (N1C && N1C->isAllOnesValue())
1390    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1391                       DAG.getConstant(0, VT), N0);
1392  // If we know the sign bits of both operands are zero, strength reduce to a
1393  // udiv instead.  Handles (X&15) /s 4 -> X&15 >> 2
1394  if (!VT.isVector()) {
1395    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
1396      return DAG.getNode(ISD::UDIV, N->getDebugLoc(), N1.getValueType(),
1397                         N0, N1);
1398  }
1399  // fold (sdiv X, pow2) -> simple ops after legalize
1400  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap() &&
1401      (isPowerOf2_64(N1C->getSExtValue()) ||
1402       isPowerOf2_64(-N1C->getSExtValue()))) {
1403    // If dividing by powers of two is cheap, then don't perform the following
1404    // fold.
1405    if (TLI.isPow2DivCheap())
1406      return SDValue();
1407
1408    int64_t pow2 = N1C->getSExtValue();
1409    int64_t abs2 = pow2 > 0 ? pow2 : -pow2;
1410    unsigned lg2 = Log2_64(abs2);
1411
1412    // Splat the sign bit into the register
1413    SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
1414                              DAG.getConstant(VT.getSizeInBits()-1,
1415                                              getShiftAmountTy()));
1416    AddToWorkList(SGN.getNode());
1417
1418    // Add (N0 < 0) ? abs2 - 1 : 0;
1419    SDValue SRL = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, SGN,
1420                              DAG.getConstant(VT.getSizeInBits() - lg2,
1421                                              getShiftAmountTy()));
1422    SDValue ADD = DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, SRL);
1423    AddToWorkList(SRL.getNode());
1424    AddToWorkList(ADD.getNode());    // Divide by pow2
1425    SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, ADD,
1426                              DAG.getConstant(lg2, getShiftAmountTy()));
1427
1428    // If we're dividing by a positive value, we're done.  Otherwise, we must
1429    // negate the result.
1430    if (pow2 > 0)
1431      return SRA;
1432
1433    AddToWorkList(SRA.getNode());
1434    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1435                       DAG.getConstant(0, VT), SRA);
1436  }
1437
1438  // if integer divide is expensive and we satisfy the requirements, emit an
1439  // alternate sequence.
1440  if (N1C && (N1C->getSExtValue() < -1 || N1C->getSExtValue() > 1) &&
1441      !TLI.isIntDivCheap()) {
1442    SDValue Op = BuildSDIV(N);
1443    if (Op.getNode()) return Op;
1444  }
1445
1446  // undef / X -> 0
1447  if (N0.getOpcode() == ISD::UNDEF)
1448    return DAG.getConstant(0, VT);
1449  // X / undef -> undef
1450  if (N1.getOpcode() == ISD::UNDEF)
1451    return N1;
1452
1453  return SDValue();
1454}
1455
1456SDValue DAGCombiner::visitUDIV(SDNode *N) {
1457  SDValue N0 = N->getOperand(0);
1458  SDValue N1 = N->getOperand(1);
1459  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1460  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1461  MVT VT = N->getValueType(0);
1462
1463  // fold vector ops
1464  if (VT.isVector()) {
1465    SDValue FoldedVOp = SimplifyVBinOp(N);
1466    if (FoldedVOp.getNode()) return FoldedVOp;
1467  }
1468
1469  // fold (udiv c1, c2) -> c1/c2
1470  if (N0C && N1C && !N1C->isNullValue())
1471    return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C);
1472  // fold (udiv x, (1 << c)) -> x >>u c
1473  if (N1C && N1C->getAPIntValue().isPowerOf2())
1474    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
1475                       DAG.getConstant(N1C->getAPIntValue().logBase2(),
1476                                       getShiftAmountTy()));
1477  // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1478  if (N1.getOpcode() == ISD::SHL) {
1479    if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
1480      if (SHC->getAPIntValue().isPowerOf2()) {
1481        MVT ADDVT = N1.getOperand(1).getValueType();
1482        SDValue Add = DAG.getNode(ISD::ADD, N->getDebugLoc(), ADDVT,
1483                                  N1.getOperand(1),
1484                                  DAG.getConstant(SHC->getAPIntValue()
1485                                                                  .logBase2(),
1486                                                  ADDVT));
1487        AddToWorkList(Add.getNode());
1488        return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, Add);
1489      }
1490    }
1491  }
1492  // fold (udiv x, c) -> alternate
1493  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) {
1494    SDValue Op = BuildUDIV(N);
1495    if (Op.getNode()) return Op;
1496  }
1497
1498  // undef / X -> 0
1499  if (N0.getOpcode() == ISD::UNDEF)
1500    return DAG.getConstant(0, VT);
1501  // X / undef -> undef
1502  if (N1.getOpcode() == ISD::UNDEF)
1503    return N1;
1504
1505  return SDValue();
1506}
1507
1508SDValue DAGCombiner::visitSREM(SDNode *N) {
1509  SDValue N0 = N->getOperand(0);
1510  SDValue N1 = N->getOperand(1);
1511  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1512  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1513  MVT VT = N->getValueType(0);
1514
1515  // fold (srem c1, c2) -> c1%c2
1516  if (N0C && N1C && !N1C->isNullValue())
1517    return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C);
1518  // If we know the sign bits of both operands are zero, strength reduce to a
1519  // urem instead.  Handles (X & 0x0FFFFFFF) %s 16 -> X&15
1520  if (!VT.isVector()) {
1521    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
1522      return DAG.getNode(ISD::UREM, N->getDebugLoc(), VT, N0, N1);
1523  }
1524
1525  // If X/C can be simplified by the division-by-constant logic, lower
1526  // X%C to the equivalent of X-X/C*C.
1527  if (N1C && !N1C->isNullValue()) {
1528    SDValue Div = DAG.getNode(ISD::SDIV, N->getDebugLoc(), VT, N0, N1);
1529    AddToWorkList(Div.getNode());
1530    SDValue OptimizedDiv = combine(Div.getNode());
1531    if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
1532      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1533                                OptimizedDiv, N1);
1534      SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul);
1535      AddToWorkList(Mul.getNode());
1536      return Sub;
1537    }
1538  }
1539
1540  // undef % X -> 0
1541  if (N0.getOpcode() == ISD::UNDEF)
1542    return DAG.getConstant(0, VT);
1543  // X % undef -> undef
1544  if (N1.getOpcode() == ISD::UNDEF)
1545    return N1;
1546
1547  return SDValue();
1548}
1549
1550SDValue DAGCombiner::visitUREM(SDNode *N) {
1551  SDValue N0 = N->getOperand(0);
1552  SDValue N1 = N->getOperand(1);
1553  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1554  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1555  MVT VT = N->getValueType(0);
1556
1557  // fold (urem c1, c2) -> c1%c2
1558  if (N0C && N1C && !N1C->isNullValue())
1559    return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C);
1560  // fold (urem x, pow2) -> (and x, pow2-1)
1561  if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2())
1562    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0,
1563                       DAG.getConstant(N1C->getAPIntValue()-1,VT));
1564  // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
1565  if (N1.getOpcode() == ISD::SHL) {
1566    if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
1567      if (SHC->getAPIntValue().isPowerOf2()) {
1568        SDValue Add =
1569          DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1,
1570                 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()),
1571                                 VT));
1572        AddToWorkList(Add.getNode());
1573        return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, Add);
1574      }
1575    }
1576  }
1577
1578  // If X/C can be simplified by the division-by-constant logic, lower
1579  // X%C to the equivalent of X-X/C*C.
1580  if (N1C && !N1C->isNullValue()) {
1581    SDValue Div = DAG.getNode(ISD::UDIV, N->getDebugLoc(), VT, N0, N1);
1582    AddToWorkList(Div.getNode());
1583    SDValue OptimizedDiv = combine(Div.getNode());
1584    if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
1585      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1586                                OptimizedDiv, N1);
1587      SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul);
1588      AddToWorkList(Mul.getNode());
1589      return Sub;
1590    }
1591  }
1592
1593  // undef % X -> 0
1594  if (N0.getOpcode() == ISD::UNDEF)
1595    return DAG.getConstant(0, VT);
1596  // X % undef -> undef
1597  if (N1.getOpcode() == ISD::UNDEF)
1598    return N1;
1599
1600  return SDValue();
1601}
1602
1603SDValue DAGCombiner::visitMULHS(SDNode *N) {
1604  SDValue N0 = N->getOperand(0);
1605  SDValue N1 = N->getOperand(1);
1606  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1607  MVT VT = N->getValueType(0);
1608
1609  // fold (mulhs x, 0) -> 0
1610  if (N1C && N1C->isNullValue())
1611    return N1;
1612  // fold (mulhs x, 1) -> (sra x, size(x)-1)
1613  if (N1C && N1C->getAPIntValue() == 1)
1614    return DAG.getNode(ISD::SRA, N->getDebugLoc(), N0.getValueType(), N0,
1615                       DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
1616                                       getShiftAmountTy()));
1617  // fold (mulhs x, undef) -> 0
1618  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1619    return DAG.getConstant(0, VT);
1620
1621  return SDValue();
1622}
1623
1624SDValue DAGCombiner::visitMULHU(SDNode *N) {
1625  SDValue N0 = N->getOperand(0);
1626  SDValue N1 = N->getOperand(1);
1627  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1628  MVT VT = N->getValueType(0);
1629
1630  // fold (mulhu x, 0) -> 0
1631  if (N1C && N1C->isNullValue())
1632    return N1;
1633  // fold (mulhu x, 1) -> 0
1634  if (N1C && N1C->getAPIntValue() == 1)
1635    return DAG.getConstant(0, N0.getValueType());
1636  // fold (mulhu x, undef) -> 0
1637  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1638    return DAG.getConstant(0, VT);
1639
1640  return SDValue();
1641}
1642
1643/// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that
1644/// compute two values. LoOp and HiOp give the opcodes for the two computations
1645/// that are being performed. Return true if a simplification was made.
1646///
1647SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
1648                                                unsigned HiOp) {
1649  // If the high half is not needed, just compute the low half.
1650  bool HiExists = N->hasAnyUseOfValue(1);
1651  if (!HiExists &&
1652      (!LegalOperations ||
1653       TLI.isOperationLegal(LoOp, N->getValueType(0)))) {
1654    SDValue Res = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0),
1655                              N->op_begin(), N->getNumOperands());
1656    return CombineTo(N, Res, Res);
1657  }
1658
1659  // If the low half is not needed, just compute the high half.
1660  bool LoExists = N->hasAnyUseOfValue(0);
1661  if (!LoExists &&
1662      (!LegalOperations ||
1663       TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
1664    SDValue Res = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1),
1665                              N->op_begin(), N->getNumOperands());
1666    return CombineTo(N, Res, Res);
1667  }
1668
1669  // If both halves are used, return as it is.
1670  if (LoExists && HiExists)
1671    return SDValue();
1672
1673  // If the two computed results can be simplified separately, separate them.
1674  if (LoExists) {
1675    SDValue Lo = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0),
1676                             N->op_begin(), N->getNumOperands());
1677    AddToWorkList(Lo.getNode());
1678    SDValue LoOpt = combine(Lo.getNode());
1679    if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
1680        (!LegalOperations ||
1681         TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
1682      return CombineTo(N, LoOpt, LoOpt);
1683  }
1684
1685  if (HiExists) {
1686    SDValue Hi = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1),
1687                             N->op_begin(), N->getNumOperands());
1688    AddToWorkList(Hi.getNode());
1689    SDValue HiOpt = combine(Hi.getNode());
1690    if (HiOpt.getNode() && HiOpt != Hi &&
1691        (!LegalOperations ||
1692         TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
1693      return CombineTo(N, HiOpt, HiOpt);
1694  }
1695
1696  return SDValue();
1697}
1698
1699SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
1700  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
1701  if (Res.getNode()) return Res;
1702
1703  return SDValue();
1704}
1705
1706SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
1707  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
1708  if (Res.getNode()) return Res;
1709
1710  return SDValue();
1711}
1712
1713SDValue DAGCombiner::visitSDIVREM(SDNode *N) {
1714  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM);
1715  if (Res.getNode()) return Res;
1716
1717  return SDValue();
1718}
1719
1720SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
1721  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM);
1722  if (Res.getNode()) return Res;
1723
1724  return SDValue();
1725}
1726
1727/// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with
1728/// two operands of the same opcode, try to simplify it.
1729SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
1730  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
1731  MVT VT = N0.getValueType();
1732  assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
1733
1734  // For each of OP in AND/OR/XOR:
1735  // fold (OP (zext x), (zext y)) -> (zext (OP x, y))
1736  // fold (OP (sext x), (sext y)) -> (sext (OP x, y))
1737  // fold (OP (aext x), (aext y)) -> (aext (OP x, y))
1738  // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y))
1739  if ((N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND||
1740       N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
1741      N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
1742    SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
1743                                 N0.getOperand(0).getValueType(),
1744                                 N0.getOperand(0), N1.getOperand(0));
1745    AddToWorkList(ORNode.getNode());
1746    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, ORNode);
1747  }
1748
1749  // For each of OP in SHL/SRL/SRA/AND...
1750  //   fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
1751  //   fold (or  (OP x, z), (OP y, z)) -> (OP (or  x, y), z)
1752  //   fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
1753  if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
1754       N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
1755      N0.getOperand(1) == N1.getOperand(1)) {
1756    SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
1757                                 N0.getOperand(0).getValueType(),
1758                                 N0.getOperand(0), N1.getOperand(0));
1759    AddToWorkList(ORNode.getNode());
1760    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
1761                       ORNode, N0.getOperand(1));
1762  }
1763
1764  return SDValue();
1765}
1766
1767SDValue DAGCombiner::visitAND(SDNode *N) {
1768  SDValue N0 = N->getOperand(0);
1769  SDValue N1 = N->getOperand(1);
1770  SDValue LL, LR, RL, RR, CC0, CC1;
1771  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1772  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1773  MVT VT = N1.getValueType();
1774  unsigned BitWidth = VT.getSizeInBits();
1775
1776  // fold vector ops
1777  if (VT.isVector()) {
1778    SDValue FoldedVOp = SimplifyVBinOp(N);
1779    if (FoldedVOp.getNode()) return FoldedVOp;
1780  }
1781
1782  // fold (and x, undef) -> 0
1783  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1784    return DAG.getConstant(0, VT);
1785  // fold (and c1, c2) -> c1&c2
1786  if (N0C && N1C)
1787    return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C);
1788  // canonicalize constant to RHS
1789  if (N0C && !N1C)
1790    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N1, N0);
1791  // fold (and x, -1) -> x
1792  if (N1C && N1C->isAllOnesValue())
1793    return N0;
1794  // if (and x, c) is known to be zero, return 0
1795  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
1796                                   APInt::getAllOnesValue(BitWidth)))
1797    return DAG.getConstant(0, VT);
1798  // reassociate and
1799  SDValue RAND = ReassociateOps(ISD::AND, N->getDebugLoc(), N0, N1);
1800  if (RAND.getNode() != 0)
1801    return RAND;
1802  // fold (and (or x, 0xFFFF), 0xFF) -> 0xFF
1803  if (N1C && N0.getOpcode() == ISD::OR)
1804    if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
1805      if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue())
1806        return N1;
1807  // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
1808  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
1809    SDValue N0Op0 = N0.getOperand(0);
1810    APInt Mask = ~N1C->getAPIntValue();
1811    Mask.trunc(N0Op0.getValueSizeInBits());
1812    if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
1813      SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(),
1814                                 N0.getValueType(), N0Op0);
1815
1816      // Replace uses of the AND with uses of the Zero extend node.
1817      CombineTo(N, Zext);
1818
1819      // We actually want to replace all uses of the any_extend with the
1820      // zero_extend, to avoid duplicating things.  This will later cause this
1821      // AND to be folded.
1822      CombineTo(N0.getNode(), Zext);
1823      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1824    }
1825  }
1826  // fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
1827  if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
1828    ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
1829    ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
1830
1831    if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
1832        LL.getValueType().isInteger()) {
1833      // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0)
1834      if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
1835        SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(),
1836                                     LR.getValueType(), LL, RL);
1837        AddToWorkList(ORNode.getNode());
1838        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
1839      }
1840      // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
1841      if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
1842        SDValue ANDNode = DAG.getNode(ISD::AND, N0.getDebugLoc(),
1843                                      LR.getValueType(), LL, RL);
1844        AddToWorkList(ANDNode.getNode());
1845        return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1);
1846      }
1847      // fold (and (setgt X,  -1), (setgt Y,  -1)) -> (setgt (or X, Y), -1)
1848      if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
1849        SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(),
1850                                     LR.getValueType(), LL, RL);
1851        AddToWorkList(ORNode.getNode());
1852        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
1853      }
1854    }
1855    // canonicalize equivalent to ll == rl
1856    if (LL == RR && LR == RL) {
1857      Op1 = ISD::getSetCCSwappedOperands(Op1);
1858      std::swap(RL, RR);
1859    }
1860    if (LL == RL && LR == RR) {
1861      bool isInteger = LL.getValueType().isInteger();
1862      ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
1863      if (Result != ISD::SETCC_INVALID &&
1864          (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType())))
1865        return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(),
1866                            LL, LR, Result);
1867    }
1868  }
1869
1870  // Simplify: (and (op x...), (op y...))  -> (op (and x, y))
1871  if (N0.getOpcode() == N1.getOpcode()) {
1872    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
1873    if (Tmp.getNode()) return Tmp;
1874  }
1875
1876  // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
1877  // fold (and (sra)) -> (and (srl)) when possible.
1878  if (!VT.isVector() &&
1879      SimplifyDemandedBits(SDValue(N, 0)))
1880    return SDValue(N, 0);
1881  // fold (zext_inreg (extload x)) -> (zextload x)
1882  if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
1883    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
1884    MVT EVT = LN0->getMemoryVT();
1885    // If we zero all the possible extended bits, then we can turn this into
1886    // a zextload if we are running before legalize or the operation is legal.
1887    unsigned BitWidth = N1.getValueSizeInBits();
1888    if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
1889                                     BitWidth - EVT.getSizeInBits())) &&
1890        ((!LegalOperations && !LN0->isVolatile()) ||
1891         TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
1892      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
1893                                       LN0->getChain(), LN0->getBasePtr(),
1894                                       LN0->getSrcValue(),
1895                                       LN0->getSrcValueOffset(), EVT,
1896                                       LN0->isVolatile(), LN0->getAlignment());
1897      AddToWorkList(N);
1898      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
1899      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1900    }
1901  }
1902  // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
1903  if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
1904      N0.hasOneUse()) {
1905    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
1906    MVT EVT = LN0->getMemoryVT();
1907    // If we zero all the possible extended bits, then we can turn this into
1908    // a zextload if we are running before legalize or the operation is legal.
1909    unsigned BitWidth = N1.getValueSizeInBits();
1910    if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
1911                                     BitWidth - EVT.getSizeInBits())) &&
1912        ((!LegalOperations && !LN0->isVolatile()) ||
1913         TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
1914      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
1915                                       LN0->getChain(),
1916                                       LN0->getBasePtr(), LN0->getSrcValue(),
1917                                       LN0->getSrcValueOffset(), EVT,
1918                                       LN0->isVolatile(), LN0->getAlignment());
1919      AddToWorkList(N);
1920      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
1921      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1922    }
1923  }
1924
1925  // fold (and (load x), 255) -> (zextload x, i8)
1926  // fold (and (extload x, i16), 255) -> (zextload x, i8)
1927  if (N1C && N0.getOpcode() == ISD::LOAD) {
1928    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
1929    if (LN0->getExtensionType() != ISD::SEXTLOAD &&
1930        LN0->isUnindexed() && N0.hasOneUse() &&
1931        // Do not change the width of a volatile load.
1932        !LN0->isVolatile()) {
1933      MVT EVT = MVT::Other;
1934      uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
1935      if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue()))
1936        EVT = MVT::getIntegerVT(ActiveBits);
1937
1938      MVT LoadedVT = LN0->getMemoryVT();
1939
1940      // Do not generate loads of non-round integer types since these can
1941      // be expensive (and would be wrong if the type is not byte sized).
1942      if (EVT != MVT::Other && LoadedVT.bitsGT(EVT) && EVT.isRound() &&
1943          (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
1944        MVT PtrType = N0.getOperand(1).getValueType();
1945
1946        // For big endian targets, we need to add an offset to the pointer to
1947        // load the correct bytes.  For little endian systems, we merely need to
1948        // read fewer bytes from the same pointer.
1949        unsigned LVTStoreBytes = LoadedVT.getStoreSizeInBits()/8;
1950        unsigned EVTStoreBytes = EVT.getStoreSizeInBits()/8;
1951        unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
1952        unsigned Alignment = LN0->getAlignment();
1953        SDValue NewPtr = LN0->getBasePtr();
1954
1955        if (TLI.isBigEndian()) {
1956          NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), PtrType,
1957                               NewPtr, DAG.getConstant(PtrOff, PtrType));
1958          Alignment = MinAlign(Alignment, PtrOff);
1959        }
1960
1961        AddToWorkList(NewPtr.getNode());
1962        SDValue Load =
1963          DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), VT, LN0->getChain(),
1964                         NewPtr, LN0->getSrcValue(), LN0->getSrcValueOffset(),
1965                         EVT, LN0->isVolatile(), Alignment);
1966        AddToWorkList(N);
1967        CombineTo(N0.getNode(), Load, Load.getValue(1));
1968        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1969      }
1970    }
1971  }
1972
1973  return SDValue();
1974}
1975
1976SDValue DAGCombiner::visitOR(SDNode *N) {
1977  SDValue N0 = N->getOperand(0);
1978  SDValue N1 = N->getOperand(1);
1979  SDValue LL, LR, RL, RR, CC0, CC1;
1980  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1981  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1982  MVT VT = N1.getValueType();
1983
1984  // fold vector ops
1985  if (VT.isVector()) {
1986    SDValue FoldedVOp = SimplifyVBinOp(N);
1987    if (FoldedVOp.getNode()) return FoldedVOp;
1988  }
1989
1990  // fold (or x, undef) -> -1
1991  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1992    return DAG.getConstant(~0ULL, VT);
1993  // fold (or c1, c2) -> c1|c2
1994  if (N0C && N1C)
1995    return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C);
1996  // canonicalize constant to RHS
1997  if (N0C && !N1C)
1998    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N1, N0);
1999  // fold (or x, 0) -> x
2000  if (N1C && N1C->isNullValue())
2001    return N0;
2002  // fold (or x, -1) -> -1
2003  if (N1C && N1C->isAllOnesValue())
2004    return N1;
2005  // fold (or x, c) -> c iff (x & ~c) == 0
2006  if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
2007    return N1;
2008  // reassociate or
2009  SDValue ROR = ReassociateOps(ISD::OR, N->getDebugLoc(), N0, N1);
2010  if (ROR.getNode() != 0)
2011    return ROR;
2012  // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
2013  if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
2014             isa<ConstantSDNode>(N0.getOperand(1))) {
2015    ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
2016    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
2017                       DAG.getNode(ISD::OR, N0.getDebugLoc(), VT,
2018                                   N0.getOperand(0), N1),
2019                       DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1));
2020  }
2021  // fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
2022  if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
2023    ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
2024    ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
2025
2026    if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
2027        LL.getValueType().isInteger()) {
2028      // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0)
2029      // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0)
2030      if (cast<ConstantSDNode>(LR)->isNullValue() &&
2031          (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
2032        SDValue ORNode = DAG.getNode(ISD::OR, LR.getDebugLoc(),
2033                                     LR.getValueType(), LL, RL);
2034        AddToWorkList(ORNode.getNode());
2035        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
2036      }
2037      // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
2038      // fold (or (setgt X, -1), (setgt Y  -1)) -> (setgt (and X, Y), -1)
2039      if (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
2040          (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
2041        SDValue ANDNode = DAG.getNode(ISD::AND, LR.getDebugLoc(),
2042                                      LR.getValueType(), LL, RL);
2043        AddToWorkList(ANDNode.getNode());
2044        return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1);
2045      }
2046    }
2047    // canonicalize equivalent to ll == rl
2048    if (LL == RR && LR == RL) {
2049      Op1 = ISD::getSetCCSwappedOperands(Op1);
2050      std::swap(RL, RR);
2051    }
2052    if (LL == RL && LR == RR) {
2053      bool isInteger = LL.getValueType().isInteger();
2054      ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
2055      if (Result != ISD::SETCC_INVALID &&
2056          (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType())))
2057        return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(),
2058                            LL, LR, Result);
2059    }
2060  }
2061
2062  // Simplify: (or (op x...), (op y...))  -> (op (or x, y))
2063  if (N0.getOpcode() == N1.getOpcode()) {
2064    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2065    if (Tmp.getNode()) return Tmp;
2066  }
2067
2068  // (or (and X, C1), (and Y, C2))  -> (and (or X, Y), C3) if possible.
2069  if (N0.getOpcode() == ISD::AND &&
2070      N1.getOpcode() == ISD::AND &&
2071      N0.getOperand(1).getOpcode() == ISD::Constant &&
2072      N1.getOperand(1).getOpcode() == ISD::Constant &&
2073      // Don't increase # computations.
2074      (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
2075    // We can only do this xform if we know that bits from X that are set in C2
2076    // but not in C1 are already zero.  Likewise for Y.
2077    const APInt &LHSMask =
2078      cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
2079    const APInt &RHSMask =
2080      cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue();
2081
2082    if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
2083        DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
2084      SDValue X = DAG.getNode(ISD::OR, N0.getDebugLoc(), VT,
2085                              N0.getOperand(0), N1.getOperand(0));
2086      return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, X,
2087                         DAG.getConstant(LHSMask | RHSMask, VT));
2088    }
2089  }
2090
2091  // See if this is some rotate idiom.
2092  if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc()))
2093    return SDValue(Rot, 0);
2094
2095  return SDValue();
2096}
2097
2098/// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present.
2099static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
2100  if (Op.getOpcode() == ISD::AND) {
2101    if (isa<ConstantSDNode>(Op.getOperand(1))) {
2102      Mask = Op.getOperand(1);
2103      Op = Op.getOperand(0);
2104    } else {
2105      return false;
2106    }
2107  }
2108
2109  if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
2110    Shift = Op;
2111    return true;
2112  }
2113
2114  return false;
2115}
2116
2117// MatchRotate - Handle an 'or' of two operands.  If this is one of the many
2118// idioms for rotate, and if the target supports rotation instructions, generate
2119// a rot[lr].
2120SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
2121  // Must be a legal type.  Expanded 'n promoted things won't work with rotates.
2122  MVT VT = LHS.getValueType();
2123  if (!TLI.isTypeLegal(VT)) return 0;
2124
2125  // The target must have at least one rotate flavor.
2126  bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT);
2127  bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT);
2128  if (!HasROTL && !HasROTR) return 0;
2129
2130  // Match "(X shl/srl V1) & V2" where V2 may not be present.
2131  SDValue LHSShift;   // The shift.
2132  SDValue LHSMask;    // AND value if any.
2133  if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
2134    return 0; // Not part of a rotate.
2135
2136  SDValue RHSShift;   // The shift.
2137  SDValue RHSMask;    // AND value if any.
2138  if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
2139    return 0; // Not part of a rotate.
2140
2141  if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
2142    return 0;   // Not shifting the same value.
2143
2144  if (LHSShift.getOpcode() == RHSShift.getOpcode())
2145    return 0;   // Shifts must disagree.
2146
2147  // Canonicalize shl to left side in a shl/srl pair.
2148  if (RHSShift.getOpcode() == ISD::SHL) {
2149    std::swap(LHS, RHS);
2150    std::swap(LHSShift, RHSShift);
2151    std::swap(LHSMask , RHSMask );
2152  }
2153
2154  unsigned OpSizeInBits = VT.getSizeInBits();
2155  SDValue LHSShiftArg = LHSShift.getOperand(0);
2156  SDValue LHSShiftAmt = LHSShift.getOperand(1);
2157  SDValue RHSShiftAmt = RHSShift.getOperand(1);
2158
2159  // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
2160  // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
2161  if (LHSShiftAmt.getOpcode() == ISD::Constant &&
2162      RHSShiftAmt.getOpcode() == ISD::Constant) {
2163    uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue();
2164    uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue();
2165    if ((LShVal + RShVal) != OpSizeInBits)
2166      return 0;
2167
2168    SDValue Rot;
2169    if (HasROTL)
2170      Rot = DAG.getNode(ISD::ROTL, DL, VT, LHSShiftArg, LHSShiftAmt);
2171    else
2172      Rot = DAG.getNode(ISD::ROTR, DL, VT, LHSShiftArg, RHSShiftAmt);
2173
2174    // If there is an AND of either shifted operand, apply it to the result.
2175    if (LHSMask.getNode() || RHSMask.getNode()) {
2176      APInt Mask = APInt::getAllOnesValue(OpSizeInBits);
2177
2178      if (LHSMask.getNode()) {
2179        APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal);
2180        Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits;
2181      }
2182      if (RHSMask.getNode()) {
2183        APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal);
2184        Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits;
2185      }
2186
2187      Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT));
2188    }
2189
2190    return Rot.getNode();
2191  }
2192
2193  // If there is a mask here, and we have a variable shift, we can't be sure
2194  // that we're masking out the right stuff.
2195  if (LHSMask.getNode() || RHSMask.getNode())
2196    return 0;
2197
2198  // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotl x, y)
2199  // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotr x, (sub 32, y))
2200  if (RHSShiftAmt.getOpcode() == ISD::SUB &&
2201      LHSShiftAmt == RHSShiftAmt.getOperand(1)) {
2202    if (ConstantSDNode *SUBC =
2203          dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
2204      if (SUBC->getAPIntValue() == OpSizeInBits) {
2205        if (HasROTL)
2206          return DAG.getNode(ISD::ROTL, DL, VT,
2207                             LHSShiftArg, LHSShiftAmt).getNode();
2208        else
2209          return DAG.getNode(ISD::ROTR, DL, VT,
2210                             LHSShiftArg, RHSShiftAmt).getNode();
2211      }
2212    }
2213  }
2214
2215  // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y)
2216  // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y))
2217  if (LHSShiftAmt.getOpcode() == ISD::SUB &&
2218      RHSShiftAmt == LHSShiftAmt.getOperand(1)) {
2219    if (ConstantSDNode *SUBC =
2220          dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
2221      if (SUBC->getAPIntValue() == OpSizeInBits) {
2222        if (HasROTR)
2223          return DAG.getNode(ISD::ROTR, DL, VT,
2224                             LHSShiftArg, RHSShiftAmt).getNode();
2225        else
2226          return DAG.getNode(ISD::ROTL, DL, VT,
2227                             LHSShiftArg, LHSShiftAmt).getNode();
2228      }
2229    }
2230  }
2231
2232  // Look for sign/zext/any-extended or truncate cases:
2233  if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
2234       || LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
2235       || LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
2236       || LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
2237      (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
2238       || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
2239       || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
2240       || RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
2241    SDValue LExtOp0 = LHSShiftAmt.getOperand(0);
2242    SDValue RExtOp0 = RHSShiftAmt.getOperand(0);
2243    if (RExtOp0.getOpcode() == ISD::SUB &&
2244        RExtOp0.getOperand(1) == LExtOp0) {
2245      // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
2246      //   (rotl x, y)
2247      // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
2248      //   (rotr x, (sub 32, y))
2249      if (ConstantSDNode *SUBC =
2250            dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0))) {
2251        if (SUBC->getAPIntValue() == OpSizeInBits) {
2252          return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
2253                             LHSShiftArg,
2254                             HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
2255        }
2256      }
2257    } else if (LExtOp0.getOpcode() == ISD::SUB &&
2258               RExtOp0 == LExtOp0.getOperand(1)) {
2259      // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
2260      //   (rotr x, y)
2261      // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
2262      //   (rotl x, (sub 32, y))
2263      if (ConstantSDNode *SUBC =
2264            dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0))) {
2265        if (SUBC->getAPIntValue() == OpSizeInBits) {
2266          return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT,
2267                             LHSShiftArg,
2268                             HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
2269        }
2270      }
2271    }
2272  }
2273
2274  return 0;
2275}
2276
2277SDValue DAGCombiner::visitXOR(SDNode *N) {
2278  SDValue N0 = N->getOperand(0);
2279  SDValue N1 = N->getOperand(1);
2280  SDValue LHS, RHS, CC;
2281  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2282  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2283  MVT VT = N0.getValueType();
2284
2285  // fold vector ops
2286  if (VT.isVector()) {
2287    SDValue FoldedVOp = SimplifyVBinOp(N);
2288    if (FoldedVOp.getNode()) return FoldedVOp;
2289  }
2290
2291  // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
2292  if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
2293    return DAG.getConstant(0, VT);
2294  // fold (xor x, undef) -> undef
2295  if (N0.getOpcode() == ISD::UNDEF)
2296    return N0;
2297  if (N1.getOpcode() == ISD::UNDEF)
2298    return N1;
2299  // fold (xor c1, c2) -> c1^c2
2300  if (N0C && N1C)
2301    return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C);
2302  // canonicalize constant to RHS
2303  if (N0C && !N1C)
2304    return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0);
2305  // fold (xor x, 0) -> x
2306  if (N1C && N1C->isNullValue())
2307    return N0;
2308  // reassociate xor
2309  SDValue RXOR = ReassociateOps(ISD::XOR, N->getDebugLoc(), N0, N1);
2310  if (RXOR.getNode() != 0)
2311    return RXOR;
2312
2313  // fold !(x cc y) -> (x !cc y)
2314  if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) {
2315    bool isInt = LHS.getValueType().isInteger();
2316    ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
2317                                               isInt);
2318
2319    if (!LegalOperations || TLI.isCondCodeLegal(NotCC, LHS.getValueType())) {
2320      switch (N0.getOpcode()) {
2321      default:
2322        assert(0 && "Unhandled SetCC Equivalent!");
2323        abort();
2324      case ISD::SETCC:
2325        return DAG.getSetCC(N->getDebugLoc(), VT, LHS, RHS, NotCC);
2326      case ISD::SELECT_CC:
2327        return DAG.getSelectCC(N->getDebugLoc(), LHS, RHS, N0.getOperand(2),
2328                               N0.getOperand(3), NotCC);
2329      }
2330    }
2331  }
2332
2333  // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
2334  if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND &&
2335      N0.getNode()->hasOneUse() &&
2336      isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
2337    SDValue V = N0.getOperand(0);
2338    V = DAG.getNode(ISD::XOR, N0.getDebugLoc(), V.getValueType(), V,
2339                    DAG.getConstant(1, V.getValueType()));
2340    AddToWorkList(V.getNode());
2341    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, V);
2342  }
2343
2344  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
2345  if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 &&
2346      (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
2347    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
2348    if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
2349      unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
2350      LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS
2351      RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS
2352      AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
2353      return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS);
2354    }
2355  }
2356  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
2357  if (N1C && N1C->isAllOnesValue() &&
2358      (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
2359    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
2360    if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
2361      unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
2362      LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS
2363      RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS
2364      AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
2365      return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS);
2366    }
2367  }
2368  // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
2369  if (N1C && N0.getOpcode() == ISD::XOR) {
2370    ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0));
2371    ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2372    if (N00C)
2373      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(1),
2374                         DAG.getConstant(N1C->getAPIntValue() ^
2375                                         N00C->getAPIntValue(), VT));
2376    if (N01C)
2377      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(0),
2378                         DAG.getConstant(N1C->getAPIntValue() ^
2379                                         N01C->getAPIntValue(), VT));
2380  }
2381  // fold (xor x, x) -> 0
2382  if (N0 == N1) {
2383    if (!VT.isVector()) {
2384      return DAG.getConstant(0, VT);
2385    } else if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)){
2386      // Produce a vector of zeros.
2387      SDValue El = DAG.getConstant(0, VT.getVectorElementType());
2388      std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
2389      return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
2390                         &Ops[0], Ops.size());
2391    }
2392  }
2393
2394  // Simplify: xor (op x...), (op y...)  -> (op (xor x, y))
2395  if (N0.getOpcode() == N1.getOpcode()) {
2396    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2397    if (Tmp.getNode()) return Tmp;
2398  }
2399
2400  // Simplify the expression using non-local knowledge.
2401  if (!VT.isVector() &&
2402      SimplifyDemandedBits(SDValue(N, 0)))
2403    return SDValue(N, 0);
2404
2405  return SDValue();
2406}
2407
2408/// visitShiftByConstant - Handle transforms common to the three shifts, when
2409/// the shift amount is a constant.
2410SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
2411  SDNode *LHS = N->getOperand(0).getNode();
2412  if (!LHS->hasOneUse()) return SDValue();
2413
2414  // We want to pull some binops through shifts, so that we have (and (shift))
2415  // instead of (shift (and)), likewise for add, or, xor, etc.  This sort of
2416  // thing happens with address calculations, so it's important to canonicalize
2417  // it.
2418  bool HighBitSet = false;  // Can we transform this if the high bit is set?
2419
2420  switch (LHS->getOpcode()) {
2421  default: return SDValue();
2422  case ISD::OR:
2423  case ISD::XOR:
2424    HighBitSet = false; // We can only transform sra if the high bit is clear.
2425    break;
2426  case ISD::AND:
2427    HighBitSet = true;  // We can only transform sra if the high bit is set.
2428    break;
2429  case ISD::ADD:
2430    if (N->getOpcode() != ISD::SHL)
2431      return SDValue(); // only shl(add) not sr[al](add).
2432    HighBitSet = false; // We can only transform sra if the high bit is clear.
2433    break;
2434  }
2435
2436  // We require the RHS of the binop to be a constant as well.
2437  ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
2438  if (!BinOpCst) return SDValue();
2439
2440  // FIXME: disable this unless the input to the binop is a shift by a constant.
2441  // If it is not a shift, it pessimizes some common cases like:
2442  //
2443  //    void foo(int *X, int i) { X[i & 1235] = 1; }
2444  //    int bar(int *X, int i) { return X[i & 255]; }
2445  SDNode *BinOpLHSVal = LHS->getOperand(0).getNode();
2446  if ((BinOpLHSVal->getOpcode() != ISD::SHL &&
2447       BinOpLHSVal->getOpcode() != ISD::SRA &&
2448       BinOpLHSVal->getOpcode() != ISD::SRL) ||
2449      !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
2450    return SDValue();
2451
2452  MVT VT = N->getValueType(0);
2453
2454  // If this is a signed shift right, and the high bit is modified by the
2455  // logical operation, do not perform the transformation. The highBitSet
2456  // boolean indicates the value of the high bit of the constant which would
2457  // cause it to be modified for this operation.
2458  if (N->getOpcode() == ISD::SRA) {
2459    bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
2460    if (BinOpRHSSignSet != HighBitSet)
2461      return SDValue();
2462  }
2463
2464  // Fold the constants, shifting the binop RHS by the shift amount.
2465  SDValue NewRHS = DAG.getNode(N->getOpcode(), LHS->getOperand(1).getDebugLoc(),
2466                               N->getValueType(0),
2467                               LHS->getOperand(1), N->getOperand(1));
2468
2469  // Create the new shift.
2470  SDValue NewShift = DAG.getNode(N->getOpcode(), LHS->getOperand(0).getDebugLoc(),
2471                                 VT, LHS->getOperand(0), N->getOperand(1));
2472
2473  // Create the new binop.
2474  return DAG.getNode(LHS->getOpcode(), N->getDebugLoc(), VT, NewShift, NewRHS);
2475}
2476
2477SDValue DAGCombiner::visitSHL(SDNode *N) {
2478  SDValue N0 = N->getOperand(0);
2479  SDValue N1 = N->getOperand(1);
2480  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2481  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2482  MVT VT = N0.getValueType();
2483  unsigned OpSizeInBits = VT.getSizeInBits();
2484
2485  // fold (shl c1, c2) -> c1<<c2
2486  if (N0C && N1C)
2487    return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C);
2488  // fold (shl 0, x) -> 0
2489  if (N0C && N0C->isNullValue())
2490    return N0;
2491  // fold (shl x, c >= size(x)) -> undef
2492  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
2493    return DAG.getNode(ISD::UNDEF, N->getDebugLoc(), VT);
2494  // fold (shl x, 0) -> x
2495  if (N1C && N1C->isNullValue())
2496    return N0;
2497  // if (shl x, c) is known to be zero, return 0
2498  if (DAG.MaskedValueIsZero(SDValue(N, 0),
2499                            APInt::getAllOnesValue(VT.getSizeInBits())))
2500    return DAG.getConstant(0, VT);
2501  // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), c))
2502  // iff (trunc c) == c
2503  if (N1.getOpcode() == ISD::TRUNCATE &&
2504      N1.getOperand(0).getOpcode() == ISD::AND &&
2505      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2506    SDValue N101 = N1.getOperand(0).getOperand(1);
2507    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2508      MVT TruncVT = N1.getValueType();
2509      SDValue N100 = N1.getOperand(0).getOperand(0);
2510      uint64_t TruncC = TruncVT.getIntegerVTBitMask() &
2511                        N101C->getZExtValue();
2512      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
2513                         DAG.getNode(ISD::AND, N->getDebugLoc(), TruncVT,
2514                                     DAG.getNode(ISD::TRUNCATE,
2515                                                 N->getDebugLoc(),
2516                                                 TruncVT, N100),
2517                                     DAG.getConstant(TruncC, TruncVT)));
2518    }
2519  }
2520
2521  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2522    return SDValue(N, 0);
2523
2524  // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
2525  if (N1C && N0.getOpcode() == ISD::SHL &&
2526      N0.getOperand(1).getOpcode() == ISD::Constant) {
2527    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2528    uint64_t c2 = N1C->getZExtValue();
2529    if (c1 + c2 > OpSizeInBits)
2530      return DAG.getConstant(0, VT);
2531    return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0),
2532                       DAG.getConstant(c1 + c2, N1.getValueType()));
2533  }
2534  // fold (shl (srl x, c1), c2) -> (shl (and x, (shl -1, c1)), (sub c2, c1)) or
2535  //                               (srl (and x, (shl -1, c1)), (sub c1, c2))
2536  if (N1C && N0.getOpcode() == ISD::SRL &&
2537      N0.getOperand(1).getOpcode() == ISD::Constant) {
2538    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2539    uint64_t c2 = N1C->getZExtValue();
2540    SDValue Mask = DAG.getNode(ISD::AND, N0.getDebugLoc(), VT, N0.getOperand(0),
2541                               DAG.getConstant(~0ULL << c1, VT));
2542    if (c2 > c1)
2543      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, Mask,
2544                         DAG.getConstant(c2-c1, N1.getValueType()));
2545    else
2546      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Mask,
2547                         DAG.getConstant(c1-c2, N1.getValueType()));
2548  }
2549  // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
2550  if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1))
2551    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
2552                       DAG.getConstant(~0ULL << N1C->getZExtValue(), VT));
2553
2554  return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
2555}
2556
2557SDValue DAGCombiner::visitSRA(SDNode *N) {
2558  SDValue N0 = N->getOperand(0);
2559  SDValue N1 = N->getOperand(1);
2560  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2561  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2562  MVT VT = N0.getValueType();
2563
2564  // fold (sra c1, c2) -> (sra c1, c2)
2565  if (N0C && N1C)
2566    return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C);
2567  // fold (sra 0, x) -> 0
2568  if (N0C && N0C->isNullValue())
2569    return N0;
2570  // fold (sra -1, x) -> -1
2571  if (N0C && N0C->isAllOnesValue())
2572    return N0;
2573  // fold (sra x, (setge c, size(x))) -> undef
2574  if (N1C && N1C->getZExtValue() >= VT.getSizeInBits())
2575    return DAG.getNode(ISD::UNDEF, N->getDebugLoc(), VT);
2576  // fold (sra x, 0) -> x
2577  if (N1C && N1C->isNullValue())
2578    return N0;
2579  // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
2580  // sext_inreg.
2581  if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
2582    unsigned LowBits = VT.getSizeInBits() - (unsigned)N1C->getZExtValue();
2583    MVT EVT = MVT::getIntegerVT(LowBits);
2584    if ((!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT)))
2585      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
2586                         N0.getOperand(0), DAG.getValueType(EVT));
2587  }
2588
2589  // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
2590  if (N1C && N0.getOpcode() == ISD::SRA) {
2591    if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2592      unsigned Sum = N1C->getZExtValue() + C1->getZExtValue();
2593      if (Sum >= VT.getSizeInBits()) Sum = VT.getSizeInBits()-1;
2594      return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0.getOperand(0),
2595                         DAG.getConstant(Sum, N1C->getValueType(0)));
2596    }
2597  }
2598
2599  // fold (sra (shl X, m), (sub result_size, n))
2600  // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
2601  // result_size - n != m.
2602  // If truncate is free for the target sext(shl) is likely to result in better
2603  // code.
2604  if (N0.getOpcode() == ISD::SHL) {
2605    // Get the two constanst of the shifts, CN0 = m, CN = n.
2606    const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2607    if (N01C && N1C) {
2608      // Determine what the truncate's result bitsize and type would be.
2609      unsigned VTValSize = VT.getSizeInBits();
2610      MVT TruncVT =
2611        MVT::getIntegerVT(VTValSize - N1C->getZExtValue());
2612      // Determine the residual right-shift amount.
2613      unsigned ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
2614
2615      // If the shift is not a no-op (in which case this should be just a sign
2616      // extend already), the truncated to type is legal, sign_extend is legal
2617      // on that type, and the the truncate to that type is both legal and free,
2618      // perform the transform.
2619      if (ShiftAmt &&
2620          TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
2621          TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
2622          TLI.isTruncateFree(VT, TruncVT)) {
2623
2624          SDValue Amt = DAG.getConstant(ShiftAmt, getShiftAmountTy());
2625          SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT,
2626                                      N0.getOperand(0), Amt);
2627          SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), TruncVT,
2628                                      Shift);
2629          return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(),
2630                             N->getValueType(0), Trunc);
2631      }
2632    }
2633  }
2634
2635  // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), c))
2636  // iff (trunc c) == c
2637  if (N1.getOpcode() == ISD::TRUNCATE &&
2638      N1.getOperand(0).getOpcode() == ISD::AND &&
2639      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2640    SDValue N101 = N1.getOperand(0).getOperand(1);
2641    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2642      MVT TruncVT = N1.getValueType();
2643      SDValue N100 = N1.getOperand(0).getOperand(0);
2644      uint64_t TruncC = TruncVT.getIntegerVTBitMask() &
2645                        N101C->getZExtValue();
2646      return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
2647                         DAG.getNode(ISD::AND, N->getDebugLoc(),
2648                                     TruncVT,
2649                                     DAG.getNode(ISD::TRUNCATE,
2650                                                 N->getDebugLoc(),
2651                                                 TruncVT, N100),
2652                                     DAG.getConstant(TruncC, TruncVT)));
2653    }
2654  }
2655
2656  // Simplify, based on bits shifted out of the LHS.
2657  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2658    return SDValue(N, 0);
2659
2660
2661  // If the sign bit is known to be zero, switch this to a SRL.
2662  if (DAG.SignBitIsZero(N0))
2663    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, N1);
2664
2665  return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
2666}
2667
2668SDValue DAGCombiner::visitSRL(SDNode *N) {
2669  SDValue N0 = N->getOperand(0);
2670  SDValue N1 = N->getOperand(1);
2671  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2672  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2673  MVT VT = N0.getValueType();
2674  unsigned OpSizeInBits = VT.getSizeInBits();
2675
2676  // fold (srl c1, c2) -> c1 >>u c2
2677  if (N0C && N1C)
2678    return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C);
2679  // fold (srl 0, x) -> 0
2680  if (N0C && N0C->isNullValue())
2681    return N0;
2682  // fold (srl x, c >= size(x)) -> undef
2683  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
2684    return DAG.getNode(ISD::UNDEF, N->getDebugLoc(), VT);
2685  // fold (srl x, 0) -> x
2686  if (N1C && N1C->isNullValue())
2687    return N0;
2688  // if (srl x, c) is known to be zero, return 0
2689  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
2690                                   APInt::getAllOnesValue(OpSizeInBits)))
2691    return DAG.getConstant(0, VT);
2692
2693  // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
2694  if (N1C && N0.getOpcode() == ISD::SRL &&
2695      N0.getOperand(1).getOpcode() == ISD::Constant) {
2696    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2697    uint64_t c2 = N1C->getZExtValue();
2698    if (c1 + c2 > OpSizeInBits)
2699      return DAG.getConstant(0, VT);
2700    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
2701                       DAG.getConstant(c1 + c2, N1.getValueType()));
2702  }
2703
2704  // fold (srl (anyextend x), c) -> (anyextend (srl x, c))
2705  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
2706    // Shifting in all undef bits?
2707    MVT SmallVT = N0.getOperand(0).getValueType();
2708    if (N1C->getZExtValue() >= SmallVT.getSizeInBits())
2709      return DAG.getNode(ISD::UNDEF, N->getDebugLoc(), VT);
2710
2711    SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT,
2712                                     N0.getOperand(0), N1);
2713    AddToWorkList(SmallShift.getNode());
2714    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift);
2715  }
2716
2717  // fold (srl (sra X, Y), 31) -> (srl X, 31).  This srl only looks at the sign
2718  // bit, which is unmodified by sra.
2719  if (N1C && N1C->getZExtValue() + 1 == VT.getSizeInBits()) {
2720    if (N0.getOpcode() == ISD::SRA)
2721      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), N1);
2722  }
2723
2724  // fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
2725  if (N1C && N0.getOpcode() == ISD::CTLZ &&
2726      N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
2727    APInt KnownZero, KnownOne;
2728    APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
2729    DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
2730
2731    // If any of the input bits are KnownOne, then the input couldn't be all
2732    // zeros, thus the result of the srl will always be zero.
2733    if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT);
2734
2735    // If all of the bits input the to ctlz node are known to be zero, then
2736    // the result of the ctlz is "32" and the result of the shift is one.
2737    APInt UnknownBits = ~KnownZero & Mask;
2738    if (UnknownBits == 0) return DAG.getConstant(1, VT);
2739
2740    // Otherwise, check to see if there is exactly one bit input to the ctlz.
2741    if ((UnknownBits & (UnknownBits - 1)) == 0) {
2742      // Okay, we know that only that the single bit specified by UnknownBits
2743      // could be set on input to the CTLZ node. If this bit is set, the SRL
2744      // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
2745      // to an SRL/XOR pair, which is likely to simplify more.
2746      unsigned ShAmt = UnknownBits.countTrailingZeros();
2747      SDValue Op = N0.getOperand(0);
2748
2749      if (ShAmt) {
2750        Op = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, Op,
2751                         DAG.getConstant(ShAmt, getShiftAmountTy()));
2752        AddToWorkList(Op.getNode());
2753      }
2754
2755      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
2756                         Op, DAG.getConstant(1, VT));
2757    }
2758  }
2759
2760  // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), c))
2761  // iff (trunc c) == c
2762  if (N1.getOpcode() == ISD::TRUNCATE &&
2763      N1.getOperand(0).getOpcode() == ISD::AND &&
2764      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2765    SDValue N101 = N1.getOperand(0).getOperand(1);
2766    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2767      MVT TruncVT = N1.getValueType();
2768      SDValue N100 = N1.getOperand(0).getOperand(0);
2769      uint64_t TruncC = TruncVT.getIntegerVTBitMask() &
2770                        N101C->getZExtValue();
2771      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
2772                         DAG.getNode(ISD::AND, N->getDebugLoc(),
2773                                     TruncVT,
2774                                     DAG.getNode(ISD::TRUNCATE,
2775                                                 N->getDebugLoc(),
2776                                                 TruncVT, N100),
2777                                     DAG.getConstant(TruncC, TruncVT)));
2778    }
2779  }
2780
2781  // fold operands of srl based on knowledge that the low bits are not
2782  // demanded.
2783  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2784    return SDValue(N, 0);
2785
2786  return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
2787}
2788
2789SDValue DAGCombiner::visitCTLZ(SDNode *N) {
2790  SDValue N0 = N->getOperand(0);
2791  MVT VT = N->getValueType(0);
2792
2793  // fold (ctlz c1) -> c2
2794  if (isa<ConstantSDNode>(N0))
2795    return DAG.getNode(ISD::CTLZ, N->getDebugLoc(), VT, N0);
2796  return SDValue();
2797}
2798
2799SDValue DAGCombiner::visitCTTZ(SDNode *N) {
2800  SDValue N0 = N->getOperand(0);
2801  MVT VT = N->getValueType(0);
2802
2803  // fold (cttz c1) -> c2
2804  if (isa<ConstantSDNode>(N0))
2805    return DAG.getNode(ISD::CTTZ, N->getDebugLoc(), VT, N0);
2806  return SDValue();
2807}
2808
2809SDValue DAGCombiner::visitCTPOP(SDNode *N) {
2810  SDValue N0 = N->getOperand(0);
2811  MVT VT = N->getValueType(0);
2812
2813  // fold (ctpop c1) -> c2
2814  if (isa<ConstantSDNode>(N0))
2815    return DAG.getNode(ISD::CTPOP, N->getDebugLoc(), VT, N0);
2816  return SDValue();
2817}
2818
2819SDValue DAGCombiner::visitSELECT(SDNode *N) {
2820  SDValue N0 = N->getOperand(0);
2821  SDValue N1 = N->getOperand(1);
2822  SDValue N2 = N->getOperand(2);
2823  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2824  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2825  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2826  MVT VT = N->getValueType(0);
2827  MVT VT0 = N0.getValueType();
2828
2829  // fold (select C, X, X) -> X
2830  if (N1 == N2)
2831    return N1;
2832  // fold (select true, X, Y) -> X
2833  if (N0C && !N0C->isNullValue())
2834    return N1;
2835  // fold (select false, X, Y) -> Y
2836  if (N0C && N0C->isNullValue())
2837    return N2;
2838  // fold (select C, 1, X) -> (or C, X)
2839  if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
2840    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
2841  // fold (select C, 0, 1) -> (xor C, 1)
2842  if (VT.isInteger() &&
2843      (VT0 == MVT::i1 ||
2844       (VT0.isInteger() &&
2845        TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent)) &&
2846      N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
2847    SDValue XORNode;
2848    if (VT == VT0)
2849      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT0,
2850                         N0, DAG.getConstant(1, VT0));
2851    XORNode = DAG.getNode(ISD::XOR, N0.getDebugLoc(), VT0,
2852                          N0, DAG.getConstant(1, VT0));
2853    AddToWorkList(XORNode.getNode());
2854    if (VT.bitsGT(VT0))
2855      return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, XORNode);
2856    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, XORNode);
2857  }
2858  // fold (select C, 0, X) -> (and (not C), X)
2859  if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
2860    SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
2861    AddToWorkList(NOTNode.getNode());
2862    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, NOTNode, N2);
2863  }
2864  // fold (select C, X, 1) -> (or (not C), X)
2865  if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
2866    SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
2867    AddToWorkList(NOTNode.getNode());
2868    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, NOTNode, N1);
2869  }
2870  // fold (select C, X, 0) -> (and C, X)
2871  if (VT == MVT::i1 && N2C && N2C->isNullValue())
2872    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
2873  // fold (select X, X, Y) -> (or X, Y)
2874  // fold (select X, 1, Y) -> (or X, Y)
2875  if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
2876    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
2877  // fold (select X, Y, X) -> (and X, Y)
2878  // fold (select X, Y, 0) -> (and X, Y)
2879  if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
2880    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
2881
2882  // If we can fold this based on the true/false value, do so.
2883  if (SimplifySelectOps(N, N1, N2))
2884    return SDValue(N, 0);  // Don't revisit N.
2885
2886  // fold selects based on a setcc into other things, such as min/max/abs
2887  if (N0.getOpcode() == ISD::SETCC) {
2888    // FIXME:
2889    // Check against MVT::Other for SELECT_CC, which is a workaround for targets
2890    // having to say they don't support SELECT_CC on every type the DAG knows
2891    // about, since there is no way to mark an opcode illegal at all value types
2892    if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other))
2893      return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT,
2894                         N0.getOperand(0), N0.getOperand(1),
2895                         N1, N2, N0.getOperand(2));
2896    else
2897      return SimplifySelect(N->getDebugLoc(), N0, N1, N2);
2898  }
2899
2900  return SDValue();
2901}
2902
2903SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
2904  SDValue N0 = N->getOperand(0);
2905  SDValue N1 = N->getOperand(1);
2906  SDValue N2 = N->getOperand(2);
2907  SDValue N3 = N->getOperand(3);
2908  SDValue N4 = N->getOperand(4);
2909  ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
2910
2911  // fold select_cc lhs, rhs, x, x, cc -> x
2912  if (N2 == N3)
2913    return N2;
2914
2915  // Determine if the condition we're dealing with is constant
2916  SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()),
2917                              N0, N1, CC, false);
2918  if (SCC.getNode()) AddToWorkList(SCC.getNode());
2919
2920  if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
2921    if (!SCCC->isNullValue())
2922      return N2;    // cond always true -> true val
2923    else
2924      return N3;    // cond always false -> false val
2925  }
2926
2927  // Fold to a simpler select_cc
2928  if (SCC.getNode() && SCC.getOpcode() == ISD::SETCC)
2929    return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N2.getValueType(),
2930                       SCC.getOperand(0), SCC.getOperand(1), N2, N3,
2931                       SCC.getOperand(2));
2932
2933  // If we can fold this based on the true/false value, do so.
2934  if (SimplifySelectOps(N, N2, N3))
2935    return SDValue(N, 0);  // Don't revisit N.
2936
2937  // fold select_cc into other things, such as min/max/abs
2938  return SimplifySelectCC(N->getDebugLoc(), N0, N1, N2, N3, CC);
2939}
2940
2941SDValue DAGCombiner::visitSETCC(SDNode *N) {
2942  return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1),
2943                       cast<CondCodeSDNode>(N->getOperand(2))->get());
2944}
2945
2946// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
2947// "fold ({s|z}ext (load x)) -> ({s|z}ext (truncate ({s|z}extload x)))"
2948// transformation. Returns true if extension are possible and the above
2949// mentioned transformation is profitable.
2950static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
2951                                    unsigned ExtOpc,
2952                                    SmallVector<SDNode*, 4> &ExtendNodes,
2953                                    const TargetLowering &TLI) {
2954  bool HasCopyToRegUses = false;
2955  bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
2956  for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
2957                            UE = N0.getNode()->use_end();
2958       UI != UE; ++UI) {
2959    SDNode *User = *UI;
2960    if (User == N)
2961      continue;
2962    // FIXME: Only extend SETCC N, N and SETCC N, c for now.
2963    if (User->getOpcode() == ISD::SETCC) {
2964      ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
2965      if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
2966        // Sign bits will be lost after a zext.
2967        return false;
2968      bool Add = false;
2969      for (unsigned i = 0; i != 2; ++i) {
2970        SDValue UseOp = User->getOperand(i);
2971        if (UseOp == N0)
2972          continue;
2973        if (!isa<ConstantSDNode>(UseOp))
2974          return false;
2975        Add = true;
2976      }
2977      if (Add)
2978        ExtendNodes.push_back(User);
2979    } else {
2980      for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
2981        SDValue UseOp = User->getOperand(i);
2982        if (UseOp == N0) {
2983          // If truncate from extended type to original load type is free
2984          // on this target, then it's ok to extend a CopyToReg.
2985          if (isTruncFree && User->getOpcode() == ISD::CopyToReg)
2986            HasCopyToRegUses = true;
2987          else
2988            return false;
2989        }
2990      }
2991    }
2992  }
2993
2994  if (HasCopyToRegUses) {
2995    bool BothLiveOut = false;
2996    for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
2997         UI != UE; ++UI) {
2998      SDNode *User = *UI;
2999      for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
3000        SDValue UseOp = User->getOperand(i);
3001        if (UseOp.getNode() == N && UseOp.getResNo() == 0) {
3002          BothLiveOut = true;
3003          break;
3004        }
3005      }
3006    }
3007    if (BothLiveOut)
3008      // Both unextended and extended values are live out. There had better be
3009      // good a reason for the transformation.
3010      return ExtendNodes.size();
3011  }
3012  return true;
3013}
3014
3015SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
3016  SDValue N0 = N->getOperand(0);
3017  MVT VT = N->getValueType(0);
3018
3019  // fold (sext c1) -> c1
3020  if (isa<ConstantSDNode>(N0))
3021    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N0);
3022
3023  // fold (sext (sext x)) -> (sext x)
3024  // fold (sext (aext x)) -> (sext x)
3025  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
3026    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT,
3027                       N0.getOperand(0));
3028
3029  if (N0.getOpcode() == ISD::TRUNCATE) {
3030    // fold (sext (truncate (load x))) -> (sext (smaller load x))
3031    // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
3032    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3033    if (NarrowLoad.getNode()) {
3034      if (NarrowLoad.getNode() != N0.getNode())
3035        CombineTo(N0.getNode(), NarrowLoad);
3036      return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
3037    }
3038
3039    // See if the value being truncated is already sign extended.  If so, just
3040    // eliminate the trunc/sext pair.
3041    SDValue Op = N0.getOperand(0);
3042    unsigned OpBits   = Op.getValueType().getSizeInBits();
3043    unsigned MidBits  = N0.getValueType().getSizeInBits();
3044    unsigned DestBits = VT.getSizeInBits();
3045    unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
3046
3047    if (OpBits == DestBits) {
3048      // Op is i32, Mid is i8, and Dest is i32.  If Op has more than 24 sign
3049      // bits, it is already ready.
3050      if (NumSignBits > DestBits-MidBits)
3051        return Op;
3052    } else if (OpBits < DestBits) {
3053      // Op is i32, Mid is i8, and Dest is i64.  If Op has more than 24 sign
3054      // bits, just sext from i32.
3055      if (NumSignBits > OpBits-MidBits)
3056        return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, Op);
3057    } else {
3058      // Op is i64, Mid is i8, and Dest is i32.  If Op has more than 56 sign
3059      // bits, just truncate to i32.
3060      if (NumSignBits > OpBits-MidBits)
3061        return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
3062    }
3063
3064    // fold (sext (truncate x)) -> (sextinreg x).
3065    if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
3066                                                 N0.getValueType())) {
3067      if (Op.getValueType().bitsLT(VT))
3068        Op = DAG.getNode(ISD::ANY_EXTEND, N0.getDebugLoc(), VT, Op);
3069      else if (Op.getValueType().bitsGT(VT))
3070        Op = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), VT, Op);
3071      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, Op,
3072                         DAG.getValueType(N0.getValueType()));
3073    }
3074  }
3075
3076  // fold (sext (load x)) -> (sext (truncate (sextload x)))
3077  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3078      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3079       TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
3080    bool DoXform = true;
3081    SmallVector<SDNode*, 4> SetCCs;
3082    if (!N0.hasOneUse())
3083      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
3084    if (DoXform) {
3085      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3086      SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(),
3087                                       VT, LN0->getChain(),
3088                                       LN0->getBasePtr(), LN0->getSrcValue(),
3089                                       LN0->getSrcValueOffset(),
3090                                       N0.getValueType(),
3091                                       LN0->isVolatile(), LN0->getAlignment());
3092      CombineTo(N, ExtLoad);
3093      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3094                                  N0.getValueType(), ExtLoad);
3095      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3096
3097      // Extend SetCC uses if necessary.
3098      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3099        SDNode *SetCC = SetCCs[i];
3100        SmallVector<SDValue, 4> Ops;
3101
3102        for (unsigned j = 0; j != 2; ++j) {
3103          SDValue SOp = SetCC->getOperand(j);
3104          if (SOp == Trunc)
3105            Ops.push_back(ExtLoad);
3106          else
3107            Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(),
3108                                      VT, SOp));
3109        }
3110
3111        Ops.push_back(SetCC->getOperand(2));
3112        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3113                                     SetCC->getValueType(0),
3114                                     &Ops[0], Ops.size()));
3115      }
3116
3117      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3118    }
3119  }
3120
3121  // fold (sext (sextload x)) -> (sext (truncate (sextload x)))
3122  // fold (sext ( extload x)) -> (sext (truncate (sextload x)))
3123  if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
3124      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
3125    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3126    MVT EVT = LN0->getMemoryVT();
3127    if ((!LegalOperations && !LN0->isVolatile()) ||
3128        TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT)) {
3129      SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3130                                       LN0->getChain(),
3131                                       LN0->getBasePtr(), LN0->getSrcValue(),
3132                                       LN0->getSrcValueOffset(), EVT,
3133                                       LN0->isVolatile(), LN0->getAlignment());
3134      CombineTo(N, ExtLoad);
3135      CombineTo(N0.getNode(),
3136                DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3137                            N0.getValueType(), ExtLoad),
3138                ExtLoad.getValue(1));
3139      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3140    }
3141  }
3142
3143  // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
3144  if (N0.getOpcode() == ISD::SETCC) {
3145    SDValue SCC =
3146      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3147                       DAG.getConstant(~0ULL, VT), DAG.getConstant(0, VT),
3148                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3149    if (SCC.getNode()) return SCC;
3150  }
3151
3152  // fold (sext x) -> (zext x) if the sign bit is known zero.
3153  if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
3154      DAG.SignBitIsZero(N0))
3155    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0);
3156
3157  return SDValue();
3158}
3159
3160SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
3161  SDValue N0 = N->getOperand(0);
3162  MVT VT = N->getValueType(0);
3163
3164  // fold (zext c1) -> c1
3165  if (isa<ConstantSDNode>(N0))
3166    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0);
3167  // fold (zext (zext x)) -> (zext x)
3168  // fold (zext (aext x)) -> (zext x)
3169  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
3170    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT,
3171                       N0.getOperand(0));
3172
3173  // fold (zext (truncate (load x))) -> (zext (smaller load x))
3174  // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
3175  if (N0.getOpcode() == ISD::TRUNCATE) {
3176    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3177    if (NarrowLoad.getNode()) {
3178      if (NarrowLoad.getNode() != N0.getNode())
3179        CombineTo(N0.getNode(), NarrowLoad);
3180      return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
3181    }
3182  }
3183
3184  // fold (zext (truncate x)) -> (and x, mask)
3185  if (N0.getOpcode() == ISD::TRUNCATE &&
3186      (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
3187    SDValue Op = N0.getOperand(0);
3188    if (Op.getValueType().bitsLT(VT)) {
3189      Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
3190    } else if (Op.getValueType().bitsGT(VT)) {
3191      Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
3192    }
3193    return DAG.getZeroExtendInReg(Op, N->getDebugLoc(), N0.getValueType());
3194  }
3195
3196  // fold (zext (and (trunc x), cst)) -> (and x, cst).
3197  if (N0.getOpcode() == ISD::AND &&
3198      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
3199      N0.getOperand(1).getOpcode() == ISD::Constant) {
3200    SDValue X = N0.getOperand(0).getOperand(0);
3201    if (X.getValueType().bitsLT(VT)) {
3202      X = DAG.getNode(ISD::ANY_EXTEND, X.getDebugLoc(), VT, X);
3203    } else if (X.getValueType().bitsGT(VT)) {
3204      X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
3205    }
3206    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
3207    Mask.zext(VT.getSizeInBits());
3208    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3209                       X, DAG.getConstant(Mask, VT));
3210  }
3211
3212  // fold (zext (load x)) -> (zext (truncate (zextload x)))
3213  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3214      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3215       TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
3216    bool DoXform = true;
3217    SmallVector<SDNode*, 4> SetCCs;
3218    if (!N0.hasOneUse())
3219      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
3220    if (DoXform) {
3221      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3222      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
3223                                       LN0->getChain(),
3224                                       LN0->getBasePtr(), LN0->getSrcValue(),
3225                                       LN0->getSrcValueOffset(),
3226                                       N0.getValueType(),
3227                                       LN0->isVolatile(), LN0->getAlignment());
3228      CombineTo(N, ExtLoad);
3229      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3230                                  N0.getValueType(), ExtLoad);
3231      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3232
3233      // Extend SetCC uses if necessary.
3234      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3235        SDNode *SetCC = SetCCs[i];
3236        SmallVector<SDValue, 4> Ops;
3237
3238        for (unsigned j = 0; j != 2; ++j) {
3239          SDValue SOp = SetCC->getOperand(j);
3240          if (SOp == Trunc)
3241            Ops.push_back(ExtLoad);
3242          else
3243            Ops.push_back(DAG.getNode(ISD::ZERO_EXTEND,
3244                                      N->getDebugLoc(), VT, SOp));
3245        }
3246
3247        Ops.push_back(SetCC->getOperand(2));
3248        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3249                                     SetCC->getValueType(0),
3250                                     &Ops[0], Ops.size()));
3251      }
3252
3253      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3254    }
3255  }
3256
3257  // fold (zext (zextload x)) -> (zext (truncate (zextload x)))
3258  // fold (zext ( extload x)) -> (zext (truncate (zextload x)))
3259  if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
3260      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
3261    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3262    MVT EVT = LN0->getMemoryVT();
3263    if ((!LegalOperations && !LN0->isVolatile()) ||
3264        TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT)) {
3265      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
3266                                       LN0->getChain(),
3267                                       LN0->getBasePtr(), LN0->getSrcValue(),
3268                                       LN0->getSrcValueOffset(), EVT,
3269                                       LN0->isVolatile(), LN0->getAlignment());
3270      CombineTo(N, ExtLoad);
3271      CombineTo(N0.getNode(),
3272                DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), N0.getValueType(),
3273                            ExtLoad),
3274                ExtLoad.getValue(1));
3275      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3276    }
3277  }
3278
3279  // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
3280  if (N0.getOpcode() == ISD::SETCC) {
3281    SDValue SCC =
3282      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3283                       DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3284                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3285    if (SCC.getNode()) return SCC;
3286  }
3287
3288  return SDValue();
3289}
3290
3291SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
3292  SDValue N0 = N->getOperand(0);
3293  MVT VT = N->getValueType(0);
3294
3295  // fold (aext c1) -> c1
3296  if (isa<ConstantSDNode>(N0))
3297    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, N0);
3298  // fold (aext (aext x)) -> (aext x)
3299  // fold (aext (zext x)) -> (zext x)
3300  // fold (aext (sext x)) -> (sext x)
3301  if (N0.getOpcode() == ISD::ANY_EXTEND  ||
3302      N0.getOpcode() == ISD::ZERO_EXTEND ||
3303      N0.getOpcode() == ISD::SIGN_EXTEND)
3304    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, N0.getOperand(0));
3305
3306  // fold (aext (truncate (load x))) -> (aext (smaller load x))
3307  // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
3308  if (N0.getOpcode() == ISD::TRUNCATE) {
3309    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3310    if (NarrowLoad.getNode()) {
3311      if (NarrowLoad.getNode() != N0.getNode())
3312        CombineTo(N0.getNode(), NarrowLoad);
3313      return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
3314    }
3315  }
3316
3317  // fold (aext (truncate x))
3318  if (N0.getOpcode() == ISD::TRUNCATE) {
3319    SDValue TruncOp = N0.getOperand(0);
3320    if (TruncOp.getValueType() == VT)
3321      return TruncOp; // x iff x size == zext size.
3322    if (TruncOp.getValueType().bitsGT(VT))
3323      return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, TruncOp);
3324    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, TruncOp);
3325  }
3326
3327  // fold (aext (and (trunc x), cst)) -> (and x, cst).
3328  if (N0.getOpcode() == ISD::AND &&
3329      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
3330      N0.getOperand(1).getOpcode() == ISD::Constant) {
3331    SDValue X = N0.getOperand(0).getOperand(0);
3332    if (X.getValueType().bitsLT(VT)) {
3333      X = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, X);
3334    } else if (X.getValueType().bitsGT(VT)) {
3335      X = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, X);
3336    }
3337    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
3338    Mask.zext(VT.getSizeInBits());
3339    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3340                       X, DAG.getConstant(Mask, VT));
3341  }
3342
3343  // fold (aext (load x)) -> (aext (truncate (extload x)))
3344  if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
3345      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3346       TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
3347    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3348    SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
3349                                     LN0->getChain(),
3350                                     LN0->getBasePtr(), LN0->getSrcValue(),
3351                                     LN0->getSrcValueOffset(),
3352                                     N0.getValueType(),
3353                                     LN0->isVolatile(), LN0->getAlignment());
3354    CombineTo(N, ExtLoad);
3355    // Redirect any chain users to the new load.
3356    DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1),
3357                                  SDValue(ExtLoad.getNode(), 1));
3358    // If any node needs the original loaded value, recompute it.
3359    if (!LN0->use_empty())
3360      CombineTo(LN0, DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3361                                 N0.getValueType(), ExtLoad),
3362                ExtLoad.getValue(1));
3363    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3364  }
3365
3366  // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
3367  // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
3368  // fold (aext ( extload x)) -> (aext (truncate (extload  x)))
3369  if (N0.getOpcode() == ISD::LOAD &&
3370      !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
3371      N0.hasOneUse()) {
3372    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3373    MVT EVT = LN0->getMemoryVT();
3374    SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(),
3375                                     VT, LN0->getChain(), LN0->getBasePtr(),
3376                                     LN0->getSrcValue(),
3377                                     LN0->getSrcValueOffset(), EVT,
3378                                     LN0->isVolatile(), LN0->getAlignment());
3379    CombineTo(N, ExtLoad);
3380    CombineTo(N0.getNode(),
3381              DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3382                          N0.getValueType(), ExtLoad),
3383              ExtLoad.getValue(1));
3384    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3385  }
3386
3387  // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
3388  if (N0.getOpcode() == ISD::SETCC) {
3389    SDValue SCC =
3390      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3391                       DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3392                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3393    if (SCC.getNode())
3394      return SCC;
3395  }
3396
3397  return SDValue();
3398}
3399
3400/// GetDemandedBits - See if the specified operand can be simplified with the
3401/// knowledge that only the bits specified by Mask are used.  If so, return the
3402/// simpler operand, otherwise return a null SDValue.
3403SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
3404  switch (V.getOpcode()) {
3405  default: break;
3406  case ISD::OR:
3407  case ISD::XOR:
3408    // If the LHS or RHS don't contribute bits to the or, drop them.
3409    if (DAG.MaskedValueIsZero(V.getOperand(0), Mask))
3410      return V.getOperand(1);
3411    if (DAG.MaskedValueIsZero(V.getOperand(1), Mask))
3412      return V.getOperand(0);
3413    break;
3414  case ISD::SRL:
3415    // Only look at single-use SRLs.
3416    if (!V.getNode()->hasOneUse())
3417      break;
3418    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3419      // See if we can recursively simplify the LHS.
3420      unsigned Amt = RHSC->getZExtValue();
3421
3422      // Watch out for shift count overflow though.
3423      if (Amt >= Mask.getBitWidth()) break;
3424      APInt NewMask = Mask << Amt;
3425      SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
3426      if (SimplifyLHS.getNode())
3427        return DAG.getNode(ISD::SRL, V.getDebugLoc(), V.getValueType(),
3428                           SimplifyLHS, V.getOperand(1));
3429    }
3430  }
3431  return SDValue();
3432}
3433
3434/// ReduceLoadWidth - If the result of a wider load is shifted to right of N
3435/// bits and then truncated to a narrower type and where N is a multiple
3436/// of number of bits of the narrower type, transform it to a narrower load
3437/// from address + N / num of bits of new type. If the result is to be
3438/// extended, also fold the extension to form a extending load.
3439SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
3440  unsigned Opc = N->getOpcode();
3441  ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
3442  SDValue N0 = N->getOperand(0);
3443  MVT VT = N->getValueType(0);
3444  MVT EVT = VT;
3445
3446  // This transformation isn't valid for vector loads.
3447  if (VT.isVector())
3448    return SDValue();
3449
3450  // Special case: SIGN_EXTEND_INREG is basically truncating to EVT then
3451  // extended to VT.
3452  if (Opc == ISD::SIGN_EXTEND_INREG) {
3453    ExtType = ISD::SEXTLOAD;
3454    EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
3455    if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))
3456      return SDValue();
3457  }
3458
3459  unsigned EVTBits = EVT.getSizeInBits();
3460  unsigned ShAmt = 0;
3461  if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
3462    if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3463      ShAmt = N01->getZExtValue();
3464      // Is the shift amount a multiple of size of VT?
3465      if ((ShAmt & (EVTBits-1)) == 0) {
3466        N0 = N0.getOperand(0);
3467        if (N0.getValueType().getSizeInBits() <= EVTBits)
3468          return SDValue();
3469      }
3470    }
3471  }
3472
3473  // Do not generate loads of non-round integer types since these can
3474  // be expensive (and would be wrong if the type is not byte sized).
3475  if (isa<LoadSDNode>(N0) && N0.hasOneUse() && EVT.isRound() &&
3476      cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() > EVTBits &&
3477      // Do not change the width of a volatile load.
3478      !cast<LoadSDNode>(N0)->isVolatile()) {
3479    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3480    MVT PtrType = N0.getOperand(1).getValueType();
3481
3482    // For big endian targets, we need to adjust the offset to the pointer to
3483    // load the correct bytes.
3484    if (TLI.isBigEndian()) {
3485      unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
3486      unsigned EVTStoreBits = EVT.getStoreSizeInBits();
3487      ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
3488    }
3489
3490    uint64_t PtrOff =  ShAmt / 8;
3491    unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
3492    SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(),
3493                                 PtrType, LN0->getBasePtr(),
3494                                 DAG.getConstant(PtrOff, PtrType));
3495    AddToWorkList(NewPtr.getNode());
3496
3497    SDValue Load = (ExtType == ISD::NON_EXTLOAD)
3498      ? DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
3499                    LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
3500                    LN0->isVolatile(), NewAlign)
3501      : DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(), NewPtr,
3502                       LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
3503                       EVT, LN0->isVolatile(), NewAlign);
3504
3505    // Replace the old load's chain with the new load's chain.
3506    WorkListRemover DeadNodes(*this);
3507    DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1),
3508                                  &DeadNodes);
3509
3510    // Return the new loaded value.
3511    return Load;
3512  }
3513
3514  return SDValue();
3515}
3516
3517SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
3518  SDValue N0 = N->getOperand(0);
3519  SDValue N1 = N->getOperand(1);
3520  MVT VT = N->getValueType(0);
3521  MVT EVT = cast<VTSDNode>(N1)->getVT();
3522  unsigned VTBits = VT.getSizeInBits();
3523  unsigned EVTBits = EVT.getSizeInBits();
3524
3525  // fold (sext_in_reg c1) -> c1
3526  if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
3527    return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, N0, N1);
3528
3529  // If the input is already sign extended, just drop the extension.
3530  if (DAG.ComputeNumSignBits(N0) >= VT.getSizeInBits()-EVTBits+1)
3531    return N0;
3532
3533  // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
3534  if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3535      EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) {
3536    return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
3537                       N0.getOperand(0), N1);
3538  }
3539
3540  // fold (sext_in_reg (sext x)) -> (sext x)
3541  // fold (sext_in_reg (aext x)) -> (sext x)
3542  // if x is small enough.
3543  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
3544    SDValue N00 = N0.getOperand(0);
3545    if (N00.getValueType().getSizeInBits() < EVTBits)
3546      return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N00, N1);
3547  }
3548
3549  // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
3550  if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits)))
3551    return DAG.getZeroExtendInReg(N0, N->getDebugLoc(), EVT);
3552
3553  // fold operands of sext_in_reg based on knowledge that the top bits are not
3554  // demanded.
3555  if (SimplifyDemandedBits(SDValue(N, 0)))
3556    return SDValue(N, 0);
3557
3558  // fold (sext_in_reg (load x)) -> (smaller sextload x)
3559  // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
3560  SDValue NarrowLoad = ReduceLoadWidth(N);
3561  if (NarrowLoad.getNode())
3562    return NarrowLoad;
3563
3564  // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
3565  // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
3566  // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
3567  if (N0.getOpcode() == ISD::SRL) {
3568    if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
3569      if (ShAmt->getZExtValue()+EVTBits <= VT.getSizeInBits()) {
3570        // We can turn this into an SRA iff the input to the SRL is already sign
3571        // extended enough.
3572        unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
3573        if (VT.getSizeInBits()-(ShAmt->getZExtValue()+EVTBits) < InSignBits)
3574          return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT,
3575                             N0.getOperand(0), N0.getOperand(1));
3576      }
3577  }
3578
3579  // fold (sext_inreg (extload x)) -> (sextload x)
3580  if (ISD::isEXTLoad(N0.getNode()) &&
3581      ISD::isUNINDEXEDLoad(N0.getNode()) &&
3582      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
3583      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3584       TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
3585    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3586    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3587                                     LN0->getChain(),
3588                                     LN0->getBasePtr(), LN0->getSrcValue(),
3589                                     LN0->getSrcValueOffset(), EVT,
3590                                     LN0->isVolatile(), LN0->getAlignment());
3591    CombineTo(N, ExtLoad);
3592    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
3593    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3594  }
3595  // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
3596  if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
3597      N0.hasOneUse() &&
3598      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
3599      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3600       TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
3601    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3602    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
3603                                     LN0->getChain(),
3604                                     LN0->getBasePtr(), LN0->getSrcValue(),
3605                                     LN0->getSrcValueOffset(), EVT,
3606                                     LN0->isVolatile(), LN0->getAlignment());
3607    CombineTo(N, ExtLoad);
3608    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
3609    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3610  }
3611  return SDValue();
3612}
3613
3614SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
3615  SDValue N0 = N->getOperand(0);
3616  MVT VT = N->getValueType(0);
3617
3618  // noop truncate
3619  if (N0.getValueType() == N->getValueType(0))
3620    return N0;
3621  // fold (truncate c1) -> c1
3622  if (isa<ConstantSDNode>(N0))
3623    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0);
3624  // fold (truncate (truncate x)) -> (truncate x)
3625  if (N0.getOpcode() == ISD::TRUNCATE)
3626    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
3627  // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
3628  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::SIGN_EXTEND||
3629      N0.getOpcode() == ISD::ANY_EXTEND) {
3630    if (N0.getOperand(0).getValueType().bitsLT(VT))
3631      // if the source is smaller than the dest, we still need an extend
3632      return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
3633                         N0.getOperand(0));
3634    else if (N0.getOperand(0).getValueType().bitsGT(VT))
3635      // if the source is larger than the dest, than we just need the truncate
3636      return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
3637    else
3638      // if the source and dest are the same type, we can drop both the extend
3639      // and the truncate
3640      return N0.getOperand(0);
3641  }
3642
3643  // See if we can simplify the input to this truncate through knowledge that
3644  // only the low bits are being used.  For example "trunc (or (shl x, 8), y)"
3645  // -> trunc y
3646  SDValue Shorter =
3647    GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
3648                                             VT.getSizeInBits()));
3649  if (Shorter.getNode())
3650    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Shorter);
3651
3652  // fold (truncate (load x)) -> (smaller load x)
3653  // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
3654  return ReduceLoadWidth(N);
3655}
3656
3657static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
3658  SDValue Elt = N->getOperand(i);
3659  if (Elt.getOpcode() != ISD::MERGE_VALUES)
3660    return Elt.getNode();
3661  return Elt.getOperand(Elt.getResNo()).getNode();
3662}
3663
3664/// CombineConsecutiveLoads - build_pair (load, load) -> load
3665/// if load locations are consecutive.
3666SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) {
3667  assert(N->getOpcode() == ISD::BUILD_PAIR);
3668
3669  SDNode *LD1 = getBuildPairElt(N, 0);
3670  if (!ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse())
3671    return SDValue();
3672  MVT LD1VT = LD1->getValueType(0);
3673  SDNode *LD2 = getBuildPairElt(N, 1);
3674  const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3675
3676  if (ISD::isNON_EXTLoad(LD2) &&
3677      LD2->hasOneUse() &&
3678      // If both are volatile this would reduce the number of volatile loads.
3679      // If one is volatile it might be ok, but play conservative and bail out.
3680      !cast<LoadSDNode>(LD1)->isVolatile() &&
3681      !cast<LoadSDNode>(LD2)->isVolatile() &&
3682      TLI.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1, MFI)) {
3683    LoadSDNode *LD = cast<LoadSDNode>(LD1);
3684    unsigned Align = LD->getAlignment();
3685    unsigned NewAlign = TLI.getTargetData()->
3686      getABITypeAlignment(VT.getTypeForMVT());
3687
3688    if (NewAlign <= Align &&
3689        (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
3690      return DAG.getLoad(VT, N->getDebugLoc(), LD->getChain(), LD->getBasePtr(),
3691                         LD->getSrcValue(), LD->getSrcValueOffset(),
3692                         false, Align);
3693  }
3694
3695  return SDValue();
3696}
3697
3698SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
3699  SDValue N0 = N->getOperand(0);
3700  MVT VT = N->getValueType(0);
3701
3702  // If the input is a BUILD_VECTOR with all constant elements, fold this now.
3703  // Only do this before legalize, since afterward the target may be depending
3704  // on the bitconvert.
3705  // First check to see if this is all constant.
3706  if (!LegalTypes &&
3707      N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
3708      VT.isVector()) {
3709    bool isSimple = true;
3710    for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i)
3711      if (N0.getOperand(i).getOpcode() != ISD::UNDEF &&
3712          N0.getOperand(i).getOpcode() != ISD::Constant &&
3713          N0.getOperand(i).getOpcode() != ISD::ConstantFP) {
3714        isSimple = false;
3715        break;
3716      }
3717
3718    MVT DestEltVT = N->getValueType(0).getVectorElementType();
3719    assert(!DestEltVT.isVector() &&
3720           "Element type of vector ValueType must not be vector!");
3721    if (isSimple)
3722      return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.getNode(), DestEltVT);
3723  }
3724
3725  // If the input is a constant, let getNode fold it.
3726  if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
3727    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, N0);
3728    if (Res.getNode() != N) return Res;
3729  }
3730
3731  // (conv (conv x, t1), t2) -> (conv x, t2)
3732  if (N0.getOpcode() == ISD::BIT_CONVERT)
3733    return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT,
3734                       N0.getOperand(0));
3735
3736  // fold (conv (load x)) -> (load (conv*)x)
3737  // If the resultant load doesn't need a higher alignment than the original!
3738  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
3739      // Do not change the width of a volatile load.
3740      !cast<LoadSDNode>(N0)->isVolatile() &&
3741      (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
3742    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3743    unsigned Align = TLI.getTargetData()->
3744      getABITypeAlignment(VT.getTypeForMVT());
3745    unsigned OrigAlign = LN0->getAlignment();
3746
3747    if (Align <= OrigAlign) {
3748      SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(),
3749                                 LN0->getBasePtr(),
3750                                 LN0->getSrcValue(), LN0->getSrcValueOffset(),
3751                                 LN0->isVolatile(), OrigAlign);
3752      AddToWorkList(N);
3753      CombineTo(N0.getNode(),
3754                DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
3755                            N0.getValueType(), Load),
3756                Load.getValue(1));
3757      return Load;
3758    }
3759  }
3760
3761  // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
3762  // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
3763  // This often reduces constant pool loads.
3764  if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
3765      N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
3766    SDValue NewConv = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), VT,
3767                                  N0.getOperand(0));
3768    AddToWorkList(NewConv.getNode());
3769
3770    APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
3771    if (N0.getOpcode() == ISD::FNEG)
3772      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
3773                         NewConv, DAG.getConstant(SignBit, VT));
3774    assert(N0.getOpcode() == ISD::FABS);
3775    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3776                       NewConv, DAG.getConstant(~SignBit, VT));
3777  }
3778
3779  // fold (bitconvert (fcopysign cst, x)) ->
3780  //         (or (and (bitconvert x), sign), (and cst, (not sign)))
3781  // Note that we don't handle (copysign x, cst) because this can always be
3782  // folded to an fneg or fabs.
3783  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
3784      isa<ConstantFPSDNode>(N0.getOperand(0)) &&
3785      VT.isInteger() && !VT.isVector()) {
3786    unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
3787    MVT IntXVT = MVT::getIntegerVT(OrigXWidth);
3788    if (TLI.isTypeLegal(IntXVT) || !LegalTypes) {
3789      SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
3790                              IntXVT, N0.getOperand(1));
3791      AddToWorkList(X.getNode());
3792
3793      // If X has a different width than the result/lhs, sext it or truncate it.
3794      unsigned VTWidth = VT.getSizeInBits();
3795      if (OrigXWidth < VTWidth) {
3796        X = DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, X);
3797        AddToWorkList(X.getNode());
3798      } else if (OrigXWidth > VTWidth) {
3799        // To get the sign bit in the right place, we have to shift it right
3800        // before truncating.
3801        X = DAG.getNode(ISD::SRL, X.getDebugLoc(),
3802                        X.getValueType(), X,
3803                        DAG.getConstant(OrigXWidth-VTWidth, X.getValueType()));
3804        AddToWorkList(X.getNode());
3805        X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
3806        AddToWorkList(X.getNode());
3807      }
3808
3809      APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
3810      X = DAG.getNode(ISD::AND, X.getDebugLoc(), VT,
3811                      X, DAG.getConstant(SignBit, VT));
3812      AddToWorkList(X.getNode());
3813
3814      SDValue Cst = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
3815                                VT, N0.getOperand(0));
3816      Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT,
3817                        Cst, DAG.getConstant(~SignBit, VT));
3818      AddToWorkList(Cst.getNode());
3819
3820      return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, X, Cst);
3821    }
3822  }
3823
3824  // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
3825  if (N0.getOpcode() == ISD::BUILD_PAIR) {
3826    SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT);
3827    if (CombineLD.getNode())
3828      return CombineLD;
3829  }
3830
3831  return SDValue();
3832}
3833
3834SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
3835  MVT VT = N->getValueType(0);
3836  return CombineConsecutiveLoads(N, VT);
3837}
3838
3839/// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector
3840/// node with Constant, ConstantFP or Undef operands.  DstEltVT indicates the
3841/// destination element value type.
3842SDValue DAGCombiner::
3843ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
3844  MVT SrcEltVT = BV->getOperand(0).getValueType();
3845
3846  // If this is already the right type, we're done.
3847  if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
3848
3849  unsigned SrcBitSize = SrcEltVT.getSizeInBits();
3850  unsigned DstBitSize = DstEltVT.getSizeInBits();
3851
3852  // If this is a conversion of N elements of one type to N elements of another
3853  // type, convert each element.  This handles FP<->INT cases.
3854  if (SrcBitSize == DstBitSize) {
3855    SmallVector<SDValue, 8> Ops;
3856    for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3857      Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
3858                                DstEltVT, BV->getOperand(i)));
3859      AddToWorkList(Ops.back().getNode());
3860    }
3861    MVT VT = MVT::getVectorVT(DstEltVT,
3862                              BV->getValueType(0).getVectorNumElements());
3863    return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
3864                       &Ops[0], Ops.size());
3865  }
3866
3867  // Otherwise, we're growing or shrinking the elements.  To avoid having to
3868  // handle annoying details of growing/shrinking FP values, we convert them to
3869  // int first.
3870  if (SrcEltVT.isFloatingPoint()) {
3871    // Convert the input float vector to a int vector where the elements are the
3872    // same sizes.
3873    assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
3874    MVT IntVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits());
3875    BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode();
3876    SrcEltVT = IntVT;
3877  }
3878
3879  // Now we know the input is an integer vector.  If the output is a FP type,
3880  // convert to integer first, then to FP of the right size.
3881  if (DstEltVT.isFloatingPoint()) {
3882    assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
3883    MVT TmpVT = MVT::getIntegerVT(DstEltVT.getSizeInBits());
3884    SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode();
3885
3886    // Next, convert to FP elements of the same size.
3887    return ConstantFoldBIT_CONVERTofBUILD_VECTOR(Tmp, DstEltVT);
3888  }
3889
3890  // Okay, we know the src/dst types are both integers of differing types.
3891  // Handling growing first.
3892  assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
3893  if (SrcBitSize < DstBitSize) {
3894    unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
3895
3896    SmallVector<SDValue, 8> Ops;
3897    for (unsigned i = 0, e = BV->getNumOperands(); i != e;
3898         i += NumInputsPerOutput) {
3899      bool isLE = TLI.isLittleEndian();
3900      APInt NewBits = APInt(DstBitSize, 0);
3901      bool EltIsUndef = true;
3902      for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
3903        // Shift the previously computed bits over.
3904        NewBits <<= SrcBitSize;
3905        SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
3906        if (Op.getOpcode() == ISD::UNDEF) continue;
3907        EltIsUndef = false;
3908
3909        NewBits |=
3910          APInt(cast<ConstantSDNode>(Op)->getAPIntValue()).zext(DstBitSize);
3911      }
3912
3913      if (EltIsUndef)
3914        Ops.push_back(DAG.getNode(ISD::UNDEF, BV->getDebugLoc(), DstEltVT));
3915      else
3916        Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
3917    }
3918
3919    MVT VT = MVT::getVectorVT(DstEltVT, Ops.size());
3920    return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
3921                       &Ops[0], Ops.size());
3922  }
3923
3924  // Finally, this must be the case where we are shrinking elements: each input
3925  // turns into multiple outputs.
3926  bool isS2V = ISD::isScalarToVector(BV);
3927  unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
3928  MVT VT = MVT::getVectorVT(DstEltVT, NumOutputsPerInput*BV->getNumOperands());
3929  SmallVector<SDValue, 8> Ops;
3930
3931  for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3932    if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
3933      for (unsigned j = 0; j != NumOutputsPerInput; ++j)
3934        Ops.push_back(DAG.getNode(ISD::UNDEF, BV->getDebugLoc(), DstEltVT));
3935      continue;
3936    }
3937
3938    APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))->getAPIntValue();
3939
3940    for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
3941      APInt ThisVal = APInt(OpVal).trunc(DstBitSize);
3942      Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
3943      if (isS2V && i == 0 && j == 0 && APInt(ThisVal).zext(SrcBitSize) == OpVal)
3944        // Simply turn this into a SCALAR_TO_VECTOR of the new type.
3945        return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
3946                           Ops[0]);
3947      OpVal = OpVal.lshr(DstBitSize);
3948    }
3949
3950    // For big endian targets, swap the order of the pieces of each element.
3951    if (TLI.isBigEndian())
3952      std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
3953  }
3954
3955  return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
3956                     &Ops[0], Ops.size());
3957}
3958
3959SDValue DAGCombiner::visitFADD(SDNode *N) {
3960  SDValue N0 = N->getOperand(0);
3961  SDValue N1 = N->getOperand(1);
3962  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
3963  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3964  MVT VT = N->getValueType(0);
3965
3966  // fold vector ops
3967  if (VT.isVector()) {
3968    SDValue FoldedVOp = SimplifyVBinOp(N);
3969    if (FoldedVOp.getNode()) return FoldedVOp;
3970  }
3971
3972  // fold (fadd c1, c2) -> (fadd c1, c2)
3973  if (N0CFP && N1CFP && VT != MVT::ppcf128)
3974    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1);
3975  // canonicalize constant to RHS
3976  if (N0CFP && !N1CFP)
3977    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0);
3978  // fold (fadd A, 0) -> A
3979  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
3980    return N0;
3981  // fold (fadd A, (fneg B)) -> (fsub A, B)
3982  if (isNegatibleForFree(N1, LegalOperations) == 2)
3983    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0,
3984                       GetNegatedExpression(N1, DAG, LegalOperations));
3985  // fold (fadd (fneg A), B) -> (fsub B, A)
3986  if (isNegatibleForFree(N0, LegalOperations) == 2)
3987    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1,
3988                       GetNegatedExpression(N0, DAG, LegalOperations));
3989
3990  // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
3991  if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD &&
3992      N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
3993    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0),
3994                       DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
3995                                   N0.getOperand(1), N1));
3996
3997  return SDValue();
3998}
3999
4000SDValue DAGCombiner::visitFSUB(SDNode *N) {
4001  SDValue N0 = N->getOperand(0);
4002  SDValue N1 = N->getOperand(1);
4003  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4004  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4005  MVT VT = N->getValueType(0);
4006
4007  // fold vector ops
4008  if (VT.isVector()) {
4009    SDValue FoldedVOp = SimplifyVBinOp(N);
4010    if (FoldedVOp.getNode()) return FoldedVOp;
4011  }
4012
4013  // fold (fsub c1, c2) -> c1-c2
4014  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4015    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1);
4016  // fold (fsub A, 0) -> A
4017  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4018    return N0;
4019  // fold (fsub 0, B) -> -B
4020  if (UnsafeFPMath && N0CFP && N0CFP->getValueAPF().isZero()) {
4021    if (isNegatibleForFree(N1, LegalOperations))
4022      return GetNegatedExpression(N1, DAG, LegalOperations);
4023    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4024      return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N1);
4025  }
4026  // fold (fsub A, (fneg B)) -> (fadd A, B)
4027  if (isNegatibleForFree(N1, LegalOperations))
4028    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0,
4029                       GetNegatedExpression(N1, DAG, LegalOperations));
4030
4031  return SDValue();
4032}
4033
4034SDValue DAGCombiner::visitFMUL(SDNode *N) {
4035  SDValue N0 = N->getOperand(0);
4036  SDValue N1 = N->getOperand(1);
4037  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4038  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4039  MVT VT = N->getValueType(0);
4040
4041  // fold vector ops
4042  if (VT.isVector()) {
4043    SDValue FoldedVOp = SimplifyVBinOp(N);
4044    if (FoldedVOp.getNode()) return FoldedVOp;
4045  }
4046
4047  // fold (fmul c1, c2) -> c1*c2
4048  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4049    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1);
4050  // canonicalize constant to RHS
4051  if (N0CFP && !N1CFP)
4052    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0);
4053  // fold (fmul A, 0) -> 0
4054  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4055    return N1;
4056  // fold (fmul X, 2.0) -> (fadd X, X)
4057  if (N1CFP && N1CFP->isExactlyValue(+2.0))
4058    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0);
4059  // fold (fmul X, (fneg 1.0)) -> (fneg X)
4060  if (N1CFP && N1CFP->isExactlyValue(-1.0))
4061    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4062      return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0);
4063
4064  // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
4065  if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
4066    if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
4067      // Both can be negated for free, check to see if at least one is cheaper
4068      // negated.
4069      if (LHSNeg == 2 || RHSNeg == 2)
4070        return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
4071                           GetNegatedExpression(N0, DAG, LegalOperations),
4072                           GetNegatedExpression(N1, DAG, LegalOperations));
4073    }
4074  }
4075
4076  // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
4077  if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FMUL &&
4078      N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
4079    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0),
4080                       DAG.getNode(ISD::FMUL, VT, N0.getOperand(1), N1));
4081
4082  return SDValue();
4083}
4084
4085SDValue DAGCombiner::visitFDIV(SDNode *N) {
4086  SDValue N0 = N->getOperand(0);
4087  SDValue N1 = N->getOperand(1);
4088  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4089  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4090  MVT VT = N->getValueType(0);
4091
4092  // fold vector ops
4093  if (VT.isVector()) {
4094    SDValue FoldedVOp = SimplifyVBinOp(N);
4095    if (FoldedVOp.getNode()) return FoldedVOp;
4096  }
4097
4098  // fold (fdiv c1, c2) -> c1/c2
4099  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4100    return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1);
4101
4102
4103  // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
4104  if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
4105    if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
4106      // Both can be negated for free, check to see if at least one is cheaper
4107      // negated.
4108      if (LHSNeg == 2 || RHSNeg == 2)
4109        return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT,
4110                           GetNegatedExpression(N0, DAG, LegalOperations),
4111                           GetNegatedExpression(N1, DAG, LegalOperations));
4112    }
4113  }
4114
4115  return SDValue();
4116}
4117
4118SDValue DAGCombiner::visitFREM(SDNode *N) {
4119  SDValue N0 = N->getOperand(0);
4120  SDValue N1 = N->getOperand(1);
4121  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4122  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4123  MVT VT = N->getValueType(0);
4124
4125  // fold (frem c1, c2) -> fmod(c1,c2)
4126  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4127    return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1);
4128
4129  return SDValue();
4130}
4131
4132SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
4133  SDValue N0 = N->getOperand(0);
4134  SDValue N1 = N->getOperand(1);
4135  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4136  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4137  MVT VT = N->getValueType(0);
4138
4139  if (N0CFP && N1CFP && VT != MVT::ppcf128)  // Constant fold
4140    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1);
4141
4142  if (N1CFP) {
4143    const APFloat& V = N1CFP->getValueAPF();
4144    // copysign(x, c1) -> fabs(x)       iff ispos(c1)
4145    // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
4146    if (!V.isNegative()) {
4147      if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
4148        return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
4149    } else {
4150      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4151        return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT,
4152                           DAG.getNode(ISD::FABS, N0.getDebugLoc(), VT, N0));
4153    }
4154  }
4155
4156  // copysign(fabs(x), y) -> copysign(x, y)
4157  // copysign(fneg(x), y) -> copysign(x, y)
4158  // copysign(copysign(x,z), y) -> copysign(x, y)
4159  if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
4160      N0.getOpcode() == ISD::FCOPYSIGN)
4161    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4162                       N0.getOperand(0), N1);
4163
4164  // copysign(x, abs(y)) -> abs(x)
4165  if (N1.getOpcode() == ISD::FABS)
4166    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
4167
4168  // copysign(x, copysign(y,z)) -> copysign(x, z)
4169  if (N1.getOpcode() == ISD::FCOPYSIGN)
4170    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4171                       N0, N1.getOperand(1));
4172
4173  // copysign(x, fp_extend(y)) -> copysign(x, y)
4174  // copysign(x, fp_round(y)) -> copysign(x, y)
4175  if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)
4176    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4177                       N0, N1.getOperand(0));
4178
4179  return SDValue();
4180}
4181
4182SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
4183  SDValue N0 = N->getOperand(0);
4184  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4185  MVT VT = N->getValueType(0);
4186  MVT OpVT = N0.getValueType();
4187
4188  // fold (sint_to_fp c1) -> c1fp
4189  if (N0C && OpVT != MVT::ppcf128)
4190    return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
4191
4192  // If the input is a legal type, and SINT_TO_FP is not legal on this target,
4193  // but UINT_TO_FP is legal on this target, try to convert.
4194  if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) &&
4195      TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) {
4196    // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
4197    if (DAG.SignBitIsZero(N0))
4198      return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
4199  }
4200
4201  return SDValue();
4202}
4203
4204SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
4205  SDValue N0 = N->getOperand(0);
4206  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
4207  MVT VT = N->getValueType(0);
4208  MVT OpVT = N0.getValueType();
4209
4210  // fold (uint_to_fp c1) -> c1fp
4211  if (N0C && OpVT != MVT::ppcf128)
4212    return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
4213
4214  // If the input is a legal type, and UINT_TO_FP is not legal on this target,
4215  // but SINT_TO_FP is legal on this target, try to convert.
4216  if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) &&
4217      TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) {
4218    // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
4219    if (DAG.SignBitIsZero(N0))
4220      return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
4221  }
4222
4223  return SDValue();
4224}
4225
4226SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
4227  SDValue N0 = N->getOperand(0);
4228  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4229  MVT VT = N->getValueType(0);
4230
4231  // fold (fp_to_sint c1fp) -> c1
4232  if (N0CFP)
4233    return DAG.getNode(ISD::FP_TO_SINT, N->getDebugLoc(), VT, N0);
4234
4235  return SDValue();
4236}
4237
4238SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
4239  SDValue N0 = N->getOperand(0);
4240  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4241  MVT VT = N->getValueType(0);
4242
4243  // fold (fp_to_uint c1fp) -> c1
4244  if (N0CFP && VT != MVT::ppcf128)
4245    return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0);
4246
4247  return SDValue();
4248}
4249
4250SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
4251  SDValue N0 = N->getOperand(0);
4252  SDValue N1 = N->getOperand(1);
4253  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4254  MVT VT = N->getValueType(0);
4255
4256  // fold (fp_round c1fp) -> c1fp
4257  if (N0CFP && N0.getValueType() != MVT::ppcf128)
4258    return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1);
4259
4260  // fold (fp_round (fp_extend x)) -> x
4261  if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
4262    return N0.getOperand(0);
4263
4264  // fold (fp_round (fp_round x)) -> (fp_round x)
4265  if (N0.getOpcode() == ISD::FP_ROUND) {
4266    // This is a value preserving truncation if both round's are.
4267    bool IsTrunc = N->getConstantOperandVal(1) == 1 &&
4268                   N0.getNode()->getConstantOperandVal(1) == 1;
4269    return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0.getOperand(0),
4270                       DAG.getIntPtrConstant(IsTrunc));
4271  }
4272
4273  // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
4274  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
4275    SDValue Tmp = DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), VT,
4276                              N0.getOperand(0), N1);
4277    AddToWorkList(Tmp.getNode());
4278    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
4279                       Tmp, N0.getOperand(1));
4280  }
4281
4282  return SDValue();
4283}
4284
4285SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
4286  SDValue N0 = N->getOperand(0);
4287  MVT VT = N->getValueType(0);
4288  MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
4289  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4290
4291  // fold (fp_round_inreg c1fp) -> c1fp
4292  if (N0CFP && (TLI.isTypeLegal(EVT) || !LegalTypes)) {
4293    SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT);
4294    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round);
4295  }
4296
4297  return SDValue();
4298}
4299
4300SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
4301  SDValue N0 = N->getOperand(0);
4302  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4303  MVT VT = N->getValueType(0);
4304
4305  // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
4306  if (N->hasOneUse() &&
4307      N->use_begin()->getOpcode() == ISD::FP_ROUND)
4308    return SDValue();
4309
4310  // fold (fp_extend c1fp) -> c1fp
4311  if (N0CFP && VT != MVT::ppcf128)
4312    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0);
4313
4314  // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
4315  // value of X.
4316  if (N0.getOpcode() == ISD::FP_ROUND
4317      && N0.getNode()->getConstantOperandVal(1) == 1) {
4318    SDValue In = N0.getOperand(0);
4319    if (In.getValueType() == VT) return In;
4320    if (VT.bitsLT(In.getValueType()))
4321      return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT,
4322                         In, N0.getOperand(1));
4323    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, In);
4324  }
4325
4326  // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
4327  if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
4328      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
4329       TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
4330    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4331    SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
4332                                     LN0->getChain(),
4333                                     LN0->getBasePtr(), LN0->getSrcValue(),
4334                                     LN0->getSrcValueOffset(),
4335                                     N0.getValueType(),
4336                                     LN0->isVolatile(), LN0->getAlignment());
4337    CombineTo(N, ExtLoad);
4338    CombineTo(N0.getNode(),
4339              DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(),
4340                          N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)),
4341              ExtLoad.getValue(1));
4342    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4343  }
4344
4345  return SDValue();
4346}
4347
4348SDValue DAGCombiner::visitFNEG(SDNode *N) {
4349  SDValue N0 = N->getOperand(0);
4350
4351  if (isNegatibleForFree(N0, LegalOperations))
4352    return GetNegatedExpression(N0, DAG, LegalOperations);
4353
4354  // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
4355  // constant pool values.
4356  if (N0.getOpcode() == ISD::BIT_CONVERT && N0.getNode()->hasOneUse() &&
4357      N0.getOperand(0).getValueType().isInteger() &&
4358      !N0.getOperand(0).getValueType().isVector()) {
4359    SDValue Int = N0.getOperand(0);
4360    MVT IntVT = Int.getValueType();
4361    if (IntVT.isInteger() && !IntVT.isVector()) {
4362      Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
4363                        DAG.getConstant(IntVT.getIntegerVTSignBit(), IntVT));
4364      AddToWorkList(Int.getNode());
4365      return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
4366                         N->getValueType(0), Int);
4367    }
4368  }
4369
4370  return SDValue();
4371}
4372
4373SDValue DAGCombiner::visitFABS(SDNode *N) {
4374  SDValue N0 = N->getOperand(0);
4375  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4376  MVT VT = N->getValueType(0);
4377
4378  // fold (fabs c1) -> fabs(c1)
4379  if (N0CFP && VT != MVT::ppcf128)
4380    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
4381  // fold (fabs (fabs x)) -> (fabs x)
4382  if (N0.getOpcode() == ISD::FABS)
4383    return N->getOperand(0);
4384  // fold (fabs (fneg x)) -> (fabs x)
4385  // fold (fabs (fcopysign x, y)) -> (fabs x)
4386  if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
4387    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0.getOperand(0));
4388
4389  // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
4390  // constant pool values.
4391  if (N0.getOpcode() == ISD::BIT_CONVERT && N0.getNode()->hasOneUse() &&
4392      N0.getOperand(0).getValueType().isInteger() &&
4393      !N0.getOperand(0).getValueType().isVector()) {
4394    SDValue Int = N0.getOperand(0);
4395    MVT IntVT = Int.getValueType();
4396    if (IntVT.isInteger() && !IntVT.isVector()) {
4397      Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
4398                        DAG.getConstant(~IntVT.getIntegerVTSignBit(), IntVT));
4399      AddToWorkList(Int.getNode());
4400      return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
4401                         N->getValueType(0), Int);
4402    }
4403  }
4404
4405  return SDValue();
4406}
4407
4408SDValue DAGCombiner::visitBRCOND(SDNode *N) {
4409  SDValue Chain = N->getOperand(0);
4410  SDValue N1 = N->getOperand(1);
4411  SDValue N2 = N->getOperand(2);
4412  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4413
4414  // never taken branch, fold to chain
4415  if (N1C && N1C->isNullValue())
4416    return Chain;
4417  // unconditional branch
4418  if (N1C && N1C->getAPIntValue() == 1)
4419    return DAG.getNode(ISD::BR, N->getDebugLoc(), MVT::Other, Chain, N2);
4420  // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
4421  // on the target.
4422  if (N1.getOpcode() == ISD::SETCC &&
4423      TLI.isOperationLegalOrCustom(ISD::BR_CC, MVT::Other)) {
4424    return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
4425                       Chain, N1.getOperand(2),
4426                       N1.getOperand(0), N1.getOperand(1), N2);
4427  }
4428
4429  return SDValue();
4430}
4431
4432// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
4433//
4434SDValue DAGCombiner::visitBR_CC(SDNode *N) {
4435  CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
4436  SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
4437
4438  // Use SimplifySetCC to simplify SETCC's.
4439  SDValue Simp = SimplifySetCC(TLI.getSetCCResultType(CondLHS.getValueType()),
4440                               CondLHS, CondRHS, CC->get(), false);
4441  if (Simp.getNode()) AddToWorkList(Simp.getNode());
4442
4443  ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(Simp.getNode());
4444
4445  // fold br_cc true, dest -> br dest (unconditional branch)
4446  if (SCCC && !SCCC->isNullValue())
4447    return DAG.getNode(ISD::BR, N->getDebugLoc(), MVT::Other,
4448                       N->getOperand(0), N->getOperand(4));
4449  // fold br_cc false, dest -> unconditional fall through
4450  if (SCCC && SCCC->isNullValue())
4451    return N->getOperand(0);
4452
4453  // fold to a simpler setcc
4454  if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
4455    return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
4456                       N->getOperand(0), Simp.getOperand(2),
4457                       Simp.getOperand(0), Simp.getOperand(1),
4458                       N->getOperand(4));
4459
4460  return SDValue();
4461}
4462
4463/// CombineToPreIndexedLoadStore - Try turning a load / store into a
4464/// pre-indexed load / store when the base pointer is an add or subtract
4465/// and it has other uses besides the load / store. After the
4466/// transformation, the new indexed load / store has effectively folded
4467/// the add / subtract in and all of its other uses are redirected to the
4468/// new load / store.
4469bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
4470  if (!LegalOperations)
4471    return false;
4472
4473  bool isLoad = true;
4474  SDValue Ptr;
4475  MVT VT;
4476  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
4477    if (LD->isIndexed())
4478      return false;
4479    VT = LD->getMemoryVT();
4480    if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
4481        !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
4482      return false;
4483    Ptr = LD->getBasePtr();
4484  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
4485    if (ST->isIndexed())
4486      return false;
4487    VT = ST->getMemoryVT();
4488    if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
4489        !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
4490      return false;
4491    Ptr = ST->getBasePtr();
4492    isLoad = false;
4493  } else {
4494    return false;
4495  }
4496
4497  // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
4498  // out.  There is no reason to make this a preinc/predec.
4499  if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
4500      Ptr.getNode()->hasOneUse())
4501    return false;
4502
4503  // Ask the target to do addressing mode selection.
4504  SDValue BasePtr;
4505  SDValue Offset;
4506  ISD::MemIndexedMode AM = ISD::UNINDEXED;
4507  if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
4508    return false;
4509  // Don't create a indexed load / store with zero offset.
4510  if (isa<ConstantSDNode>(Offset) &&
4511      cast<ConstantSDNode>(Offset)->isNullValue())
4512    return false;
4513
4514  // Try turning it into a pre-indexed load / store except when:
4515  // 1) The new base ptr is a frame index.
4516  // 2) If N is a store and the new base ptr is either the same as or is a
4517  //    predecessor of the value being stored.
4518  // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
4519  //    that would create a cycle.
4520  // 4) All uses are load / store ops that use it as old base ptr.
4521
4522  // Check #1.  Preinc'ing a frame index would require copying the stack pointer
4523  // (plus the implicit offset) to a register to preinc anyway.
4524  if (isa<FrameIndexSDNode>(BasePtr))
4525    return false;
4526
4527  // Check #2.
4528  if (!isLoad) {
4529    SDValue Val = cast<StoreSDNode>(N)->getValue();
4530    if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode()))
4531      return false;
4532  }
4533
4534  // Now check for #3 and #4.
4535  bool RealUse = false;
4536  for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
4537         E = Ptr.getNode()->use_end(); I != E; ++I) {
4538    SDNode *Use = *I;
4539    if (Use == N)
4540      continue;
4541    if (Use->isPredecessorOf(N))
4542      return false;
4543
4544    if (!((Use->getOpcode() == ISD::LOAD &&
4545           cast<LoadSDNode>(Use)->getBasePtr() == Ptr) ||
4546          (Use->getOpcode() == ISD::STORE &&
4547           cast<StoreSDNode>(Use)->getBasePtr() == Ptr)))
4548      RealUse = true;
4549  }
4550
4551  if (!RealUse)
4552    return false;
4553
4554  SDValue Result;
4555  if (isLoad)
4556    Result = DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(),
4557                                BasePtr, Offset, AM);
4558  else
4559    Result = DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(),
4560                                 BasePtr, Offset, AM);
4561  ++PreIndexedNodes;
4562  ++NodesCombined;
4563  DOUT << "\nReplacing.4 "; DEBUG(N->dump(&DAG));
4564  DOUT << "\nWith: "; DEBUG(Result.getNode()->dump(&DAG));
4565  DOUT << '\n';
4566  WorkListRemover DeadNodes(*this);
4567  if (isLoad) {
4568    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
4569                                  &DeadNodes);
4570    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
4571                                  &DeadNodes);
4572  } else {
4573    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
4574                                  &DeadNodes);
4575  }
4576
4577  // Finally, since the node is now dead, remove it from the graph.
4578  DAG.DeleteNode(N);
4579
4580  // Replace the uses of Ptr with uses of the updated base value.
4581  DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0),
4582                                &DeadNodes);
4583  removeFromWorkList(Ptr.getNode());
4584  DAG.DeleteNode(Ptr.getNode());
4585
4586  return true;
4587}
4588
4589/// CombineToPostIndexedLoadStore - Try to combine a load / store with a
4590/// add / sub of the base pointer node into a post-indexed load / store.
4591/// The transformation folded the add / subtract into the new indexed
4592/// load / store effectively and all of its uses are redirected to the
4593/// new load / store.
4594bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
4595  if (!LegalOperations)
4596    return false;
4597
4598  bool isLoad = true;
4599  SDValue Ptr;
4600  MVT VT;
4601  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
4602    if (LD->isIndexed())
4603      return false;
4604    VT = LD->getMemoryVT();
4605    if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
4606        !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
4607      return false;
4608    Ptr = LD->getBasePtr();
4609  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
4610    if (ST->isIndexed())
4611      return false;
4612    VT = ST->getMemoryVT();
4613    if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
4614        !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
4615      return false;
4616    Ptr = ST->getBasePtr();
4617    isLoad = false;
4618  } else {
4619    return false;
4620  }
4621
4622  if (Ptr.getNode()->hasOneUse())
4623    return false;
4624
4625  for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
4626         E = Ptr.getNode()->use_end(); I != E; ++I) {
4627    SDNode *Op = *I;
4628    if (Op == N ||
4629        (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
4630      continue;
4631
4632    SDValue BasePtr;
4633    SDValue Offset;
4634    ISD::MemIndexedMode AM = ISD::UNINDEXED;
4635    if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
4636      if (Ptr == Offset)
4637        std::swap(BasePtr, Offset);
4638      if (Ptr != BasePtr)
4639        continue;
4640      // Don't create a indexed load / store with zero offset.
4641      if (isa<ConstantSDNode>(Offset) &&
4642          cast<ConstantSDNode>(Offset)->isNullValue())
4643        continue;
4644
4645      // Try turning it into a post-indexed load / store except when
4646      // 1) All uses are load / store ops that use it as base ptr.
4647      // 2) Op must be independent of N, i.e. Op is neither a predecessor
4648      //    nor a successor of N. Otherwise, if Op is folded that would
4649      //    create a cycle.
4650
4651      // Check for #1.
4652      bool TryNext = false;
4653      for (SDNode::use_iterator II = BasePtr.getNode()->use_begin(),
4654             EE = BasePtr.getNode()->use_end(); II != EE; ++II) {
4655        SDNode *Use = *II;
4656        if (Use == Ptr.getNode())
4657          continue;
4658
4659        // If all the uses are load / store addresses, then don't do the
4660        // transformation.
4661        if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
4662          bool RealUse = false;
4663          for (SDNode::use_iterator III = Use->use_begin(),
4664                 EEE = Use->use_end(); III != EEE; ++III) {
4665            SDNode *UseUse = *III;
4666            if (!((UseUse->getOpcode() == ISD::LOAD &&
4667                   cast<LoadSDNode>(UseUse)->getBasePtr().getNode() == Use) ||
4668                  (UseUse->getOpcode() == ISD::STORE &&
4669                   cast<StoreSDNode>(UseUse)->getBasePtr().getNode() == Use)))
4670              RealUse = true;
4671          }
4672
4673          if (!RealUse) {
4674            TryNext = true;
4675            break;
4676          }
4677        }
4678      }
4679
4680      if (TryNext)
4681        continue;
4682
4683      // Check for #2
4684      if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
4685        SDValue Result = isLoad
4686          ? DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(),
4687                               BasePtr, Offset, AM)
4688          : DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(),
4689                                BasePtr, Offset, AM);
4690        ++PostIndexedNodes;
4691        ++NodesCombined;
4692        DOUT << "\nReplacing.5 "; DEBUG(N->dump(&DAG));
4693        DOUT << "\nWith: "; DEBUG(Result.getNode()->dump(&DAG));
4694        DOUT << '\n';
4695        WorkListRemover DeadNodes(*this);
4696        if (isLoad) {
4697          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
4698                                        &DeadNodes);
4699          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
4700                                        &DeadNodes);
4701        } else {
4702          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
4703                                        &DeadNodes);
4704        }
4705
4706        // Finally, since the node is now dead, remove it from the graph.
4707        DAG.DeleteNode(N);
4708
4709        // Replace the uses of Use with uses of the updated base value.
4710        DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
4711                                      Result.getValue(isLoad ? 1 : 0),
4712                                      &DeadNodes);
4713        removeFromWorkList(Op);
4714        DAG.DeleteNode(Op);
4715        return true;
4716      }
4717    }
4718  }
4719
4720  return false;
4721}
4722
4723/// InferAlignment - If we can infer some alignment information from this
4724/// pointer, return it.
4725static unsigned InferAlignment(SDValue Ptr, SelectionDAG &DAG) {
4726  // If this is a direct reference to a stack slot, use information about the
4727  // stack slot's alignment.
4728  int FrameIdx = 1 << 31;
4729  int64_t FrameOffset = 0;
4730  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
4731    FrameIdx = FI->getIndex();
4732  } else if (Ptr.getOpcode() == ISD::ADD &&
4733             isa<ConstantSDNode>(Ptr.getOperand(1)) &&
4734             isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
4735    FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4736    FrameOffset = Ptr.getConstantOperandVal(1);
4737  }
4738
4739  if (FrameIdx != (1 << 31)) {
4740    // FIXME: Handle FI+CST.
4741    const MachineFrameInfo &MFI = *DAG.getMachineFunction().getFrameInfo();
4742    if (MFI.isFixedObjectIndex(FrameIdx)) {
4743      int64_t ObjectOffset = MFI.getObjectOffset(FrameIdx) + FrameOffset;
4744
4745      // The alignment of the frame index can be determined from its offset from
4746      // the incoming frame position.  If the frame object is at offset 32 and
4747      // the stack is guaranteed to be 16-byte aligned, then we know that the
4748      // object is 16-byte aligned.
4749      unsigned StackAlign = DAG.getTarget().getFrameInfo()->getStackAlignment();
4750      unsigned Align = MinAlign(ObjectOffset, StackAlign);
4751
4752      // Finally, the frame object itself may have a known alignment.  Factor
4753      // the alignment + offset into a new alignment.  For example, if we know
4754      // the  FI is 8 byte aligned, but the pointer is 4 off, we really have a
4755      // 4-byte alignment of the resultant pointer.  Likewise align 4 + 4-byte
4756      // offset = 4-byte alignment, align 4 + 1-byte offset = align 1, etc.
4757      unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
4758                                      FrameOffset);
4759      return std::max(Align, FIInfoAlign);
4760    }
4761  }
4762
4763  return 0;
4764}
4765
4766SDValue DAGCombiner::visitLOAD(SDNode *N) {
4767  LoadSDNode *LD  = cast<LoadSDNode>(N);
4768  SDValue Chain = LD->getChain();
4769  SDValue Ptr   = LD->getBasePtr();
4770
4771  // Try to infer better alignment information than the load already has.
4772  if (!Fast && LD->isUnindexed()) {
4773    if (unsigned Align = InferAlignment(Ptr, DAG)) {
4774      if (Align > LD->getAlignment())
4775        return DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(),
4776                              LD->getValueType(0),
4777                              Chain, Ptr, LD->getSrcValue(),
4778                              LD->getSrcValueOffset(), LD->getMemoryVT(),
4779                              LD->isVolatile(), Align);
4780    }
4781  }
4782
4783  // If load is not volatile and there are no uses of the loaded value (and
4784  // the updated indexed value in case of indexed loads), change uses of the
4785  // chain value into uses of the chain input (i.e. delete the dead load).
4786  if (!LD->isVolatile()) {
4787    if (N->getValueType(1) == MVT::Other) {
4788      // Unindexed loads.
4789      if (N->hasNUsesOfValue(0, 0)) {
4790        // It's not safe to use the two value CombineTo variant here. e.g.
4791        // v1, chain2 = load chain1, loc
4792        // v2, chain3 = load chain2, loc
4793        // v3         = add v2, c
4794        // Now we replace use of chain2 with chain1.  This makes the second load
4795        // isomorphic to the one we are deleting, and thus makes this load live.
4796        DOUT << "\nReplacing.6 "; DEBUG(N->dump(&DAG));
4797        DOUT << "\nWith chain: "; DEBUG(Chain.getNode()->dump(&DAG));
4798        DOUT << "\n";
4799        WorkListRemover DeadNodes(*this);
4800        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain, &DeadNodes);
4801
4802        if (N->use_empty()) {
4803          removeFromWorkList(N);
4804          DAG.DeleteNode(N);
4805        }
4806
4807        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4808      }
4809    } else {
4810      // Indexed loads.
4811      assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
4812      if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
4813        SDValue Undef = DAG.getNode(ISD::UNDEF, N->getDebugLoc(),
4814                                    N->getValueType(0));
4815        DOUT << "\nReplacing.6 "; DEBUG(N->dump(&DAG));
4816        DOUT << "\nWith: "; DEBUG(Undef.getNode()->dump(&DAG));
4817        DOUT << " and 2 other values\n";
4818        WorkListRemover DeadNodes(*this);
4819        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef, &DeadNodes);
4820        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1),
4821                                      DAG.getNode(ISD::UNDEF, N->getDebugLoc(),
4822                                                  N->getValueType(1)),
4823                                      &DeadNodes);
4824        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain, &DeadNodes);
4825        removeFromWorkList(N);
4826        DAG.DeleteNode(N);
4827        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4828      }
4829    }
4830  }
4831
4832  // If this load is directly stored, replace the load value with the stored
4833  // value.
4834  // TODO: Handle store large -> read small portion.
4835  // TODO: Handle TRUNCSTORE/LOADEXT
4836  if (LD->getExtensionType() == ISD::NON_EXTLOAD &&
4837      !LD->isVolatile()) {
4838    if (ISD::isNON_TRUNCStore(Chain.getNode())) {
4839      StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
4840      if (PrevST->getBasePtr() == Ptr &&
4841          PrevST->getValue().getValueType() == N->getValueType(0))
4842      return CombineTo(N, Chain.getOperand(1), Chain);
4843    }
4844  }
4845
4846  if (CombinerAA) {
4847    // Walk up chain skipping non-aliasing memory nodes.
4848    SDValue BetterChain = FindBetterChain(N, Chain);
4849
4850    // If there is a better chain.
4851    if (Chain != BetterChain) {
4852      SDValue ReplLoad;
4853
4854      // Replace the chain to void dependency.
4855      if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
4856        ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(),
4857                               BetterChain, Ptr,
4858                               LD->getSrcValue(), LD->getSrcValueOffset(),
4859                               LD->isVolatile(), LD->getAlignment());
4860      } else {
4861        ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(),
4862                                  LD->getValueType(0),
4863                                  BetterChain, Ptr, LD->getSrcValue(),
4864                                  LD->getSrcValueOffset(),
4865                                  LD->getMemoryVT(),
4866                                  LD->isVolatile(),
4867                                  LD->getAlignment());
4868      }
4869
4870      // Create token factor to keep old chain connected.
4871      SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
4872                                  MVT::Other, Chain, ReplLoad.getValue(1));
4873
4874      // Replace uses with load result and token factor. Don't add users
4875      // to work list.
4876      return CombineTo(N, ReplLoad.getValue(0), Token, false);
4877    }
4878  }
4879
4880  // Try transforming N to an indexed load.
4881  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
4882    return SDValue(N, 0);
4883
4884  return SDValue();
4885}
4886
4887SDValue DAGCombiner::visitSTORE(SDNode *N) {
4888  StoreSDNode *ST  = cast<StoreSDNode>(N);
4889  SDValue Chain = ST->getChain();
4890  SDValue Value = ST->getValue();
4891  SDValue Ptr   = ST->getBasePtr();
4892
4893  // Try to infer better alignment information than the store already has.
4894  if (!Fast && ST->isUnindexed()) {
4895    if (unsigned Align = InferAlignment(Ptr, DAG)) {
4896      if (Align > ST->getAlignment())
4897        return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
4898                                 Ptr, ST->getSrcValue(),
4899                                 ST->getSrcValueOffset(), ST->getMemoryVT(),
4900                                 ST->isVolatile(), Align);
4901    }
4902  }
4903
4904  // If this is a store of a bit convert, store the input value if the
4905  // resultant store does not need a higher alignment than the original.
4906  if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
4907      ST->isUnindexed()) {
4908    unsigned Align = ST->getAlignment();
4909    MVT SVT = Value.getOperand(0).getValueType();
4910    unsigned OrigAlign = TLI.getTargetData()->
4911      getABITypeAlignment(SVT.getTypeForMVT());
4912    if (Align <= OrigAlign &&
4913        ((!LegalOperations && !ST->isVolatile()) ||
4914         TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
4915      return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0),
4916                          Ptr, ST->getSrcValue(),
4917                          ST->getSrcValueOffset(), ST->isVolatile(), OrigAlign);
4918  }
4919
4920  // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
4921  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
4922    // NOTE: If the original store is volatile, this transform must not increase
4923    // the number of stores.  For example, on x86-32 an f64 can be stored in one
4924    // processor operation but an i64 (which is not legal) requires two.  So the
4925    // transform should not be done in this case.
4926    if (Value.getOpcode() != ISD::TargetConstantFP) {
4927      SDValue Tmp;
4928      switch (CFP->getValueType(0).getSimpleVT()) {
4929      default: assert(0 && "Unknown FP type");
4930      case MVT::f80:    // We don't do this for these yet.
4931      case MVT::f128:
4932      case MVT::ppcf128:
4933        break;
4934      case MVT::f32:
4935        if (((TLI.isTypeLegal(MVT::i32) || !LegalTypes) && !LegalOperations &&
4936             !ST->isVolatile()) ||
4937            TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
4938          Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
4939                              bitcastToAPInt().getZExtValue(), MVT::i32);
4940          return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
4941                              Ptr, ST->getSrcValue(),
4942                              ST->getSrcValueOffset(), ST->isVolatile(),
4943                              ST->getAlignment());
4944        }
4945        break;
4946      case MVT::f64:
4947        if (((TLI.isTypeLegal(MVT::i64) || !LegalTypes) && !LegalOperations &&
4948             !ST->isVolatile()) ||
4949            TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
4950          Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
4951                                getZExtValue(), MVT::i64);
4952          return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
4953                              Ptr, ST->getSrcValue(),
4954                              ST->getSrcValueOffset(), ST->isVolatile(),
4955                              ST->getAlignment());
4956        } else if (!ST->isVolatile() &&
4957                   TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
4958          // Many FP stores are not made apparent until after legalize, e.g. for
4959          // argument passing.  Since this is so common, custom legalize the
4960          // 64-bit integer store into two 32-bit stores.
4961          uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
4962          SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32);
4963          SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
4964          if (TLI.isBigEndian()) std::swap(Lo, Hi);
4965
4966          int SVOffset = ST->getSrcValueOffset();
4967          unsigned Alignment = ST->getAlignment();
4968          bool isVolatile = ST->isVolatile();
4969
4970          SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo,
4971                                     Ptr, ST->getSrcValue(),
4972                                     ST->getSrcValueOffset(),
4973                                     isVolatile, ST->getAlignment());
4974          Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr,
4975                            DAG.getConstant(4, Ptr.getValueType()));
4976          SVOffset += 4;
4977          Alignment = MinAlign(Alignment, 4U);
4978          SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi,
4979                                     Ptr, ST->getSrcValue(),
4980                                     SVOffset, isVolatile, Alignment);
4981          return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
4982                             St0, St1);
4983        }
4984
4985        break;
4986      }
4987    }
4988  }
4989
4990  if (CombinerAA) {
4991    // Walk up chain skipping non-aliasing memory nodes.
4992    SDValue BetterChain = FindBetterChain(N, Chain);
4993
4994    // If there is a better chain.
4995    if (Chain != BetterChain) {
4996      // Replace the chain to avoid dependency.
4997      SDValue ReplStore;
4998      if (ST->isTruncatingStore()) {
4999        ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr,
5000                                      ST->getSrcValue(),ST->getSrcValueOffset(),
5001                                      ST->getMemoryVT(),
5002                                      ST->isVolatile(), ST->getAlignment());
5003      } else {
5004        ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr,
5005                                 ST->getSrcValue(), ST->getSrcValueOffset(),
5006                                 ST->isVolatile(), ST->getAlignment());
5007      }
5008
5009      // Create token to keep both nodes around.
5010      SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
5011                                  MVT::Other, Chain, ReplStore);
5012
5013      // Don't add users to work list.
5014      return CombineTo(N, Token, false);
5015    }
5016  }
5017
5018  // Try transforming N to an indexed store.
5019  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
5020    return SDValue(N, 0);
5021
5022  // FIXME: is there such a thing as a truncating indexed store?
5023  if (ST->isTruncatingStore() && ST->isUnindexed() &&
5024      Value.getValueType().isInteger()) {
5025    // See if we can simplify the input to this truncstore with knowledge that
5026    // only the low bits are being used.  For example:
5027    // "truncstore (or (shl x, 8), y), i8"  -> "truncstore y, i8"
5028    SDValue Shorter =
5029      GetDemandedBits(Value,
5030                      APInt::getLowBitsSet(Value.getValueSizeInBits(),
5031                                           ST->getMemoryVT().getSizeInBits()));
5032    AddToWorkList(Value.getNode());
5033    if (Shorter.getNode())
5034      return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter,
5035                               Ptr, ST->getSrcValue(),
5036                               ST->getSrcValueOffset(), ST->getMemoryVT(),
5037                               ST->isVolatile(), ST->getAlignment());
5038
5039    // Otherwise, see if we can simplify the operation with
5040    // SimplifyDemandedBits, which only works if the value has a single use.
5041    if (SimplifyDemandedBits(Value,
5042                             APInt::getLowBitsSet(
5043                               Value.getValueSizeInBits(),
5044                               ST->getMemoryVT().getSizeInBits())))
5045      return SDValue(N, 0);
5046  }
5047
5048  // If this is a load followed by a store to the same location, then the store
5049  // is dead/noop.
5050  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
5051    if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
5052        ST->isUnindexed() && !ST->isVolatile() &&
5053        // There can't be any side effects between the load and store, such as
5054        // a call or store.
5055        Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
5056      // The store is dead, remove it.
5057      return Chain;
5058    }
5059  }
5060
5061  // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
5062  // truncating store.  We can do this even if this is already a truncstore.
5063  if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
5064      && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
5065      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
5066                            ST->getMemoryVT())) {
5067    return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0),
5068                             Ptr, ST->getSrcValue(),
5069                             ST->getSrcValueOffset(), ST->getMemoryVT(),
5070                             ST->isVolatile(), ST->getAlignment());
5071  }
5072
5073  return SDValue();
5074}
5075
5076SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
5077  SDValue InVec = N->getOperand(0);
5078  SDValue InVal = N->getOperand(1);
5079  SDValue EltNo = N->getOperand(2);
5080
5081  // If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new
5082  // vector with the inserted element.
5083  if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
5084    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
5085    SmallVector<SDValue, 8> Ops(InVec.getNode()->op_begin(),
5086                                InVec.getNode()->op_end());
5087    if (Elt < Ops.size())
5088      Ops[Elt] = InVal;
5089    return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5090                       InVec.getValueType(), &Ops[0], Ops.size());
5091  }
5092
5093  return SDValue();
5094}
5095
5096SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
5097  // (vextract (scalar_to_vector val, 0) -> val
5098  SDValue InVec = N->getOperand(0);
5099
5100 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR)
5101   return InVec.getOperand(0);
5102
5103  // Perform only after legalization to ensure build_vector / vector_shuffle
5104  // optimizations have already been done.
5105  if (!LegalOperations) return SDValue();
5106
5107  // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
5108  // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
5109  // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
5110  SDValue EltNo = N->getOperand(1);
5111
5112  if (isa<ConstantSDNode>(EltNo)) {
5113    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
5114    bool NewLoad = false;
5115    bool BCNumEltsChanged = false;
5116    MVT VT = InVec.getValueType();
5117    MVT EVT = VT.getVectorElementType();
5118    MVT LVT = EVT;
5119
5120    if (InVec.getOpcode() == ISD::BIT_CONVERT) {
5121      MVT BCVT = InVec.getOperand(0).getValueType();
5122      if (!BCVT.isVector() || EVT.bitsGT(BCVT.getVectorElementType()))
5123        return SDValue();
5124      if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
5125        BCNumEltsChanged = true;
5126      InVec = InVec.getOperand(0);
5127      EVT = BCVT.getVectorElementType();
5128      NewLoad = true;
5129    }
5130
5131    LoadSDNode *LN0 = NULL;
5132    if (ISD::isNormalLoad(InVec.getNode())) {
5133      LN0 = cast<LoadSDNode>(InVec);
5134    } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5135               InVec.getOperand(0).getValueType() == EVT &&
5136               ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
5137      LN0 = cast<LoadSDNode>(InVec.getOperand(0));
5138    } else if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE) {
5139      // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
5140      // =>
5141      // (load $addr+1*size)
5142
5143      // If the bit convert changed the number of elements, it is unsafe
5144      // to examine the mask.
5145      if (BCNumEltsChanged)
5146        return SDValue();
5147      unsigned Idx = cast<ConstantSDNode>(InVec.getOperand(2).
5148                                          getOperand(Elt))->getZExtValue();
5149      unsigned NumElems = InVec.getOperand(2).getNumOperands();
5150      InVec = (Idx < NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
5151      if (InVec.getOpcode() == ISD::BIT_CONVERT)
5152        InVec = InVec.getOperand(0);
5153      if (ISD::isNormalLoad(InVec.getNode())) {
5154        LN0 = cast<LoadSDNode>(InVec);
5155        Elt = (Idx < NumElems) ? Idx : Idx - NumElems;
5156      }
5157    }
5158
5159    if (!LN0 || !LN0->hasOneUse() || LN0->isVolatile())
5160      return SDValue();
5161
5162    unsigned Align = LN0->getAlignment();
5163    if (NewLoad) {
5164      // Check the resultant load doesn't need a higher alignment than the
5165      // original load.
5166      unsigned NewAlign =
5167        TLI.getTargetData()->getABITypeAlignment(LVT.getTypeForMVT());
5168
5169      if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
5170        return SDValue();
5171
5172      Align = NewAlign;
5173    }
5174
5175    SDValue NewPtr = LN0->getBasePtr();
5176    if (Elt) {
5177      unsigned PtrOff = LVT.getSizeInBits() * Elt / 8;
5178      MVT PtrType = NewPtr.getValueType();
5179      if (TLI.isBigEndian())
5180        PtrOff = VT.getSizeInBits() / 8 - PtrOff;
5181      NewPtr = DAG.getNode(ISD::ADD, N->getDebugLoc(), PtrType, NewPtr,
5182                           DAG.getConstant(PtrOff, PtrType));
5183    }
5184
5185    return DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
5186                       LN0->getSrcValue(), LN0->getSrcValueOffset(),
5187                       LN0->isVolatile(), Align);
5188  }
5189
5190  return SDValue();
5191}
5192
5193SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
5194  unsigned NumInScalars = N->getNumOperands();
5195  MVT VT = N->getValueType(0);
5196  unsigned NumElts = VT.getVectorNumElements();
5197  MVT EltType = VT.getVectorElementType();
5198
5199  // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
5200  // operations.  If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
5201  // at most two distinct vectors, turn this into a shuffle node.
5202  SDValue VecIn1, VecIn2;
5203  for (unsigned i = 0; i != NumInScalars; ++i) {
5204    // Ignore undef inputs.
5205    if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
5206
5207    // If this input is something other than a EXTRACT_VECTOR_ELT with a
5208    // constant index, bail out.
5209    if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5210        !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) {
5211      VecIn1 = VecIn2 = SDValue(0, 0);
5212      break;
5213    }
5214
5215    // If the input vector type disagrees with the result of the build_vector,
5216    // we can't make a shuffle.
5217    SDValue ExtractedFromVec = N->getOperand(i).getOperand(0);
5218    if (ExtractedFromVec.getValueType() != VT) {
5219      VecIn1 = VecIn2 = SDValue(0, 0);
5220      break;
5221    }
5222
5223    // Otherwise, remember this.  We allow up to two distinct input vectors.
5224    if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
5225      continue;
5226
5227    if (VecIn1.getNode() == 0) {
5228      VecIn1 = ExtractedFromVec;
5229    } else if (VecIn2.getNode() == 0) {
5230      VecIn2 = ExtractedFromVec;
5231    } else {
5232      // Too many inputs.
5233      VecIn1 = VecIn2 = SDValue(0, 0);
5234      break;
5235    }
5236  }
5237
5238  // If everything is good, we can make a shuffle operation.
5239  if (VecIn1.getNode()) {
5240    SmallVector<SDValue, 8> BuildVecIndices;
5241    for (unsigned i = 0; i != NumInScalars; ++i) {
5242      if (N->getOperand(i).getOpcode() == ISD::UNDEF) {
5243        BuildVecIndices.push_back(DAG.getNode(ISD::UNDEF,
5244                                              N->getDebugLoc(),
5245                                              TLI.getPointerTy()));
5246        continue;
5247      }
5248
5249      SDValue Extract = N->getOperand(i);
5250
5251      // If extracting from the first vector, just use the index directly.
5252      if (Extract.getOperand(0) == VecIn1) {
5253        BuildVecIndices.push_back(Extract.getOperand(1));
5254        continue;
5255      }
5256
5257      // Otherwise, use InIdx + VecSize
5258      unsigned Idx =
5259        cast<ConstantSDNode>(Extract.getOperand(1))->getZExtValue();
5260      BuildVecIndices.push_back(DAG.getIntPtrConstant(Idx+NumInScalars));
5261    }
5262
5263    // Add count and size info.
5264    MVT BuildVecVT = MVT::getVectorVT(TLI.getPointerTy(), NumElts);
5265    if (!TLI.isTypeLegal(BuildVecVT) && LegalTypes)
5266      return SDValue();
5267
5268    // Return the new VECTOR_SHUFFLE node.
5269    SDValue Ops[5];
5270    Ops[0] = VecIn1;
5271    if (VecIn2.getNode()) {
5272      Ops[1] = VecIn2;
5273    } else {
5274      // Use an undef build_vector as input for the second operand.
5275      std::vector<SDValue> UnOps(NumInScalars,
5276                                 DAG.getNode(ISD::UNDEF, N->getDebugLoc(),
5277                                             EltType));
5278      Ops[1] = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
5279                           &UnOps[0], UnOps.size());
5280      AddToWorkList(Ops[1].getNode());
5281    }
5282
5283    Ops[2] = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), BuildVecVT,
5284                         &BuildVecIndices[0], BuildVecIndices.size());
5285    return DAG.getNode(ISD::VECTOR_SHUFFLE, N->getDebugLoc(), VT, Ops, 3);
5286  }
5287
5288  return SDValue();
5289}
5290
5291SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
5292  // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
5293  // EXTRACT_SUBVECTOR operations.  If so, and if the EXTRACT_SUBVECTOR vector
5294  // inputs come from at most two distinct vectors, turn this into a shuffle
5295  // node.
5296
5297  // If we only have one input vector, we don't need to do any concatenation.
5298  if (N->getNumOperands() == 1)
5299    return N->getOperand(0);
5300
5301  return SDValue();
5302}
5303
5304SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
5305  SDValue ShufMask = N->getOperand(2);
5306  unsigned NumElts = ShufMask.getNumOperands();
5307
5308  SDValue N0 = N->getOperand(0);
5309  SDValue N1 = N->getOperand(1);
5310
5311  assert(N0.getValueType().getVectorNumElements() == NumElts &&
5312        "Vector shuffle must be normalized in DAG");
5313
5314  // If the shuffle mask is an identity operation on the LHS, return the LHS.
5315  bool isIdentity = true;
5316  for (unsigned i = 0; i != NumElts; ++i) {
5317    if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF &&
5318        cast<ConstantSDNode>(ShufMask.getOperand(i))->getZExtValue() != i) {
5319      isIdentity = false;
5320      break;
5321    }
5322  }
5323  if (isIdentity) return N->getOperand(0);
5324
5325  // If the shuffle mask is an identity operation on the RHS, return the RHS.
5326  isIdentity = true;
5327  for (unsigned i = 0; i != NumElts; ++i) {
5328    if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF &&
5329        cast<ConstantSDNode>(ShufMask.getOperand(i))->getZExtValue() !=
5330          i+NumElts) {
5331      isIdentity = false;
5332      break;
5333    }
5334  }
5335  if (isIdentity) return N->getOperand(1);
5336
5337  // Check if the shuffle is a unary shuffle, i.e. one of the vectors is not
5338  // needed at all.
5339  bool isUnary = true;
5340  bool isSplat = true;
5341  int VecNum = -1;
5342  unsigned BaseIdx = 0;
5343  for (unsigned i = 0; i != NumElts; ++i)
5344    if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF) {
5345      unsigned Idx=cast<ConstantSDNode>(ShufMask.getOperand(i))->getZExtValue();
5346      int V = (Idx < NumElts) ? 0 : 1;
5347      if (VecNum == -1) {
5348        VecNum = V;
5349        BaseIdx = Idx;
5350      } else {
5351        if (BaseIdx != Idx)
5352          isSplat = false;
5353        if (VecNum != V) {
5354          isUnary = false;
5355          break;
5356        }
5357      }
5358    }
5359
5360  // Normalize unary shuffle so the RHS is undef.
5361  if (isUnary && VecNum == 1)
5362    std::swap(N0, N1);
5363
5364  // If it is a splat, check if the argument vector is a build_vector with
5365  // all scalar elements the same.
5366  if (isSplat) {
5367    SDNode *V = N0.getNode();
5368
5369    // If this is a bit convert that changes the element type of the vector but
5370    // not the number of vector elements, look through it.  Be careful not to
5371    // look though conversions that change things like v4f32 to v2f64.
5372    if (V->getOpcode() == ISD::BIT_CONVERT) {
5373      SDValue ConvInput = V->getOperand(0);
5374      if (ConvInput.getValueType().isVector() &&
5375          ConvInput.getValueType().getVectorNumElements() == NumElts)
5376        V = ConvInput.getNode();
5377    }
5378
5379    if (V->getOpcode() == ISD::BUILD_VECTOR) {
5380      unsigned NumElems = V->getNumOperands();
5381      if (NumElems > BaseIdx) {
5382        SDValue Base;
5383        bool AllSame = true;
5384        for (unsigned i = 0; i != NumElems; ++i) {
5385          if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
5386            Base = V->getOperand(i);
5387            break;
5388          }
5389        }
5390        // Splat of <u, u, u, u>, return <u, u, u, u>
5391        if (!Base.getNode())
5392          return N0;
5393        for (unsigned i = 0; i != NumElems; ++i) {
5394          if (V->getOperand(i) != Base) {
5395            AllSame = false;
5396            break;
5397          }
5398        }
5399        // Splat of <x, x, x, x>, return <x, x, x, x>
5400        if (AllSame)
5401          return N0;
5402      }
5403    }
5404  }
5405
5406  // If it is a unary or the LHS and the RHS are the same node, turn the RHS
5407  // into an undef.
5408  if (isUnary || N0 == N1) {
5409    // Check the SHUFFLE mask, mapping any inputs from the 2nd operand into the
5410    // first operand.
5411    SmallVector<SDValue, 8> MappedOps;
5412
5413    for (unsigned i = 0; i != NumElts; ++i) {
5414      if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF ||
5415          cast<ConstantSDNode>(ShufMask.getOperand(i))->getZExtValue() <
5416            NumElts) {
5417        MappedOps.push_back(ShufMask.getOperand(i));
5418      } else {
5419        unsigned NewIdx =
5420          cast<ConstantSDNode>(ShufMask.getOperand(i))->getZExtValue() -
5421          NumElts;
5422        MappedOps.push_back(DAG.getConstant(NewIdx,
5423                                        ShufMask.getOperand(i).getValueType()));
5424      }
5425    }
5426
5427    ShufMask = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5428                           ShufMask.getValueType(),
5429                           &MappedOps[0], MappedOps.size());
5430    AddToWorkList(ShufMask.getNode());
5431    return DAG.getNode(ISD::VECTOR_SHUFFLE, N->getDebugLoc(),
5432                       N->getValueType(0), N0,
5433                       DAG.getNode(ISD::UNDEF, N->getDebugLoc(),
5434                                   N->getValueType(0)),
5435                       ShufMask);
5436  }
5437
5438  return SDValue();
5439}
5440
5441/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
5442/// an AND to a vector_shuffle with the destination vector and a zero vector.
5443/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
5444///      vector_shuffle V, Zero, <0, 4, 2, 4>
5445SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
5446  SDValue LHS = N->getOperand(0);
5447  SDValue RHS = N->getOperand(1);
5448  if (N->getOpcode() == ISD::AND) {
5449    if (RHS.getOpcode() == ISD::BIT_CONVERT)
5450      RHS = RHS.getOperand(0);
5451    if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
5452      std::vector<SDValue> IdxOps;
5453      unsigned NumOps = RHS.getNumOperands();
5454      unsigned NumElts = NumOps;
5455      for (unsigned i = 0; i != NumElts; ++i) {
5456        SDValue Elt = RHS.getOperand(i);
5457        if (!isa<ConstantSDNode>(Elt))
5458          return SDValue();
5459        else if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
5460          IdxOps.push_back(DAG.getIntPtrConstant(i));
5461        else if (cast<ConstantSDNode>(Elt)->isNullValue())
5462          IdxOps.push_back(DAG.getIntPtrConstant(NumElts));
5463        else
5464          return SDValue();
5465      }
5466
5467      // Let's see if the target supports this vector_shuffle.
5468      if (!TLI.isVectorClearMaskLegal(IdxOps, TLI.getPointerTy(), DAG))
5469        return SDValue();
5470
5471      // Return the new VECTOR_SHUFFLE node.
5472      MVT EVT = RHS.getValueType().getVectorElementType();
5473      MVT VT = MVT::getVectorVT(EVT, NumElts);
5474      MVT MaskVT = MVT::getVectorVT(TLI.getPointerTy(), NumElts);
5475      std::vector<SDValue> Ops;
5476      LHS = DAG.getNode(ISD::BIT_CONVERT, LHS.getDebugLoc(), VT, LHS);
5477      Ops.push_back(LHS);
5478      AddToWorkList(LHS.getNode());
5479      std::vector<SDValue> ZeroOps(NumElts, DAG.getConstant(0, EVT));
5480      Ops.push_back(DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5481                                VT, &ZeroOps[0], ZeroOps.size()));
5482      Ops.push_back(DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
5483                                MaskVT, &IdxOps[0], IdxOps.size()));
5484      SDValue Result = DAG.getNode(ISD::VECTOR_SHUFFLE, N->getDebugLoc(),
5485                                   VT, &Ops[0], Ops.size());
5486
5487      if (VT != N->getValueType(0))
5488        Result = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
5489                             N->getValueType(0), Result);
5490
5491      return Result;
5492    }
5493  }
5494
5495  return SDValue();
5496}
5497
5498/// SimplifyVBinOp - Visit a binary vector operation, like ADD.
5499SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
5500  // After legalize, the target may be depending on adds and other
5501  // binary ops to provide legal ways to construct constants or other
5502  // things. Simplifying them may result in a loss of legality.
5503  if (LegalOperations) return SDValue();
5504
5505  MVT VT = N->getValueType(0);
5506  assert(VT.isVector() && "SimplifyVBinOp only works on vectors!");
5507
5508  MVT EltType = VT.getVectorElementType();
5509  SDValue LHS = N->getOperand(0);
5510  SDValue RHS = N->getOperand(1);
5511  SDValue Shuffle = XformToShuffleWithZero(N);
5512  if (Shuffle.getNode()) return Shuffle;
5513
5514  // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold
5515  // this operation.
5516  if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
5517      RHS.getOpcode() == ISD::BUILD_VECTOR) {
5518    SmallVector<SDValue, 8> Ops;
5519    for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
5520      SDValue LHSOp = LHS.getOperand(i);
5521      SDValue RHSOp = RHS.getOperand(i);
5522      // If these two elements can't be folded, bail out.
5523      if ((LHSOp.getOpcode() != ISD::UNDEF &&
5524           LHSOp.getOpcode() != ISD::Constant &&
5525           LHSOp.getOpcode() != ISD::ConstantFP) ||
5526          (RHSOp.getOpcode() != ISD::UNDEF &&
5527           RHSOp.getOpcode() != ISD::Constant &&
5528           RHSOp.getOpcode() != ISD::ConstantFP))
5529        break;
5530
5531      // Can't fold divide by zero.
5532      if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
5533          N->getOpcode() == ISD::FDIV) {
5534        if ((RHSOp.getOpcode() == ISD::Constant &&
5535             cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) ||
5536            (RHSOp.getOpcode() == ISD::ConstantFP &&
5537             cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero()))
5538          break;
5539      }
5540
5541      Ops.push_back(DAG.getNode(N->getOpcode(), LHS.getDebugLoc(),
5542                                EltType, LHSOp, RHSOp));
5543      AddToWorkList(Ops.back().getNode());
5544      assert((Ops.back().getOpcode() == ISD::UNDEF ||
5545              Ops.back().getOpcode() == ISD::Constant ||
5546              Ops.back().getOpcode() == ISD::ConstantFP) &&
5547             "Scalar binop didn't fold!");
5548    }
5549
5550    if (Ops.size() == LHS.getNumOperands()) {
5551      MVT VT = LHS.getValueType();
5552      return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
5553                         &Ops[0], Ops.size());
5554    }
5555  }
5556
5557  return SDValue();
5558}
5559
5560SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0,
5561                                    SDValue N1, SDValue N2){
5562  assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
5563
5564  SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
5565                                 cast<CondCodeSDNode>(N0.getOperand(2))->get());
5566
5567  // If we got a simplified select_cc node back from SimplifySelectCC, then
5568  // break it down into a new SETCC node, and a new SELECT node, and then return
5569  // the SELECT node, since we were called with a SELECT node.
5570  if (SCC.getNode()) {
5571    // Check to see if we got a select_cc back (to turn into setcc/select).
5572    // Otherwise, just return whatever node we got back, like fabs.
5573    if (SCC.getOpcode() == ISD::SELECT_CC) {
5574      SDValue SETCC = DAG.getNode(ISD::SETCC, N0.getDebugLoc(),
5575                                  N0.getValueType(),
5576                                  SCC.getOperand(0), SCC.getOperand(1),
5577                                  SCC.getOperand(4));
5578      AddToWorkList(SETCC.getNode());
5579      return DAG.getNode(ISD::SELECT, SCC.getDebugLoc(), SCC.getValueType(),
5580                         SCC.getOperand(2), SCC.getOperand(3), SETCC);
5581    }
5582
5583    return SCC;
5584  }
5585  return SDValue();
5586}
5587
5588/// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS
5589/// are the two values being selected between, see if we can simplify the
5590/// select.  Callers of this should assume that TheSelect is deleted if this
5591/// returns true.  As such, they should return the appropriate thing (e.g. the
5592/// node) back to the top-level of the DAG combiner loop to avoid it being
5593/// looked at.
5594bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
5595                                    SDValue RHS) {
5596
5597  // If this is a select from two identical things, try to pull the operation
5598  // through the select.
5599  if (LHS.getOpcode() == RHS.getOpcode() && LHS.hasOneUse() && RHS.hasOneUse()){
5600    // If this is a load and the token chain is identical, replace the select
5601    // of two loads with a load through a select of the address to load from.
5602    // This triggers in things like "select bool X, 10.0, 123.0" after the FP
5603    // constants have been dropped into the constant pool.
5604    if (LHS.getOpcode() == ISD::LOAD &&
5605        // Do not let this transformation reduce the number of volatile loads.
5606        !cast<LoadSDNode>(LHS)->isVolatile() &&
5607        !cast<LoadSDNode>(RHS)->isVolatile() &&
5608        // Token chains must be identical.
5609        LHS.getOperand(0) == RHS.getOperand(0)) {
5610      LoadSDNode *LLD = cast<LoadSDNode>(LHS);
5611      LoadSDNode *RLD = cast<LoadSDNode>(RHS);
5612
5613      // If this is an EXTLOAD, the VT's must match.
5614      if (LLD->getMemoryVT() == RLD->getMemoryVT()) {
5615        // FIXME: this conflates two src values, discarding one.  This is not
5616        // the right thing to do, but nothing uses srcvalues now.  When they do,
5617        // turn SrcValue into a list of locations.
5618        SDValue Addr;
5619        if (TheSelect->getOpcode() == ISD::SELECT) {
5620          // Check that the condition doesn't reach either load.  If so, folding
5621          // this will induce a cycle into the DAG.
5622          if (!LLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
5623              !RLD->isPredecessorOf(TheSelect->getOperand(0).getNode())) {
5624            Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
5625                               LLD->getBasePtr().getValueType(),
5626                               TheSelect->getOperand(0), LLD->getBasePtr(),
5627                               RLD->getBasePtr());
5628          }
5629        } else {
5630          // Check that the condition doesn't reach either load.  If so, folding
5631          // this will induce a cycle into the DAG.
5632          if (!LLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
5633              !RLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
5634              !LLD->isPredecessorOf(TheSelect->getOperand(1).getNode()) &&
5635              !RLD->isPredecessorOf(TheSelect->getOperand(1).getNode())) {
5636            Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
5637                               LLD->getBasePtr().getValueType(),
5638                               TheSelect->getOperand(0),
5639                               TheSelect->getOperand(1),
5640                               LLD->getBasePtr(), RLD->getBasePtr(),
5641                               TheSelect->getOperand(4));
5642          }
5643        }
5644
5645        if (Addr.getNode()) {
5646          SDValue Load;
5647          if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
5648            Load = DAG.getLoad(TheSelect->getValueType(0),
5649                               TheSelect->getDebugLoc(),
5650                               LLD->getChain(),
5651                               Addr,LLD->getSrcValue(),
5652                               LLD->getSrcValueOffset(),
5653                               LLD->isVolatile(),
5654                               LLD->getAlignment());
5655          } else {
5656            Load = DAG.getExtLoad(LLD->getExtensionType(),
5657                                  TheSelect->getDebugLoc(),
5658                                  TheSelect->getValueType(0),
5659                                  LLD->getChain(), Addr, LLD->getSrcValue(),
5660                                  LLD->getSrcValueOffset(),
5661                                  LLD->getMemoryVT(),
5662                                  LLD->isVolatile(),
5663                                  LLD->getAlignment());
5664          }
5665
5666          // Users of the select now use the result of the load.
5667          CombineTo(TheSelect, Load);
5668
5669          // Users of the old loads now use the new load's chain.  We know the
5670          // old-load value is dead now.
5671          CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
5672          CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
5673          return true;
5674        }
5675      }
5676    }
5677  }
5678
5679  return false;
5680}
5681
5682SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
5683                                      SDValue N2, SDValue N3,
5684                                      ISD::CondCode CC, bool NotExtCompare) {
5685  MVT VT = N2.getValueType();
5686  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
5687  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
5688  ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
5689
5690  // Determine if the condition we're dealing with is constant
5691  SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()),
5692                              N0, N1, CC, false);
5693  if (SCC.getNode()) AddToWorkList(SCC.getNode());
5694  ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode());
5695
5696  // fold select_cc true, x, y -> x
5697  if (SCCC && !SCCC->isNullValue())
5698    return N2;
5699  // fold select_cc false, x, y -> y
5700  if (SCCC && SCCC->isNullValue())
5701    return N3;
5702
5703  // Check to see if we can simplify the select into an fabs node
5704  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
5705    // Allow either -0.0 or 0.0
5706    if (CFP->getValueAPF().isZero()) {
5707      // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
5708      if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
5709          N0 == N2 && N3.getOpcode() == ISD::FNEG &&
5710          N2 == N3.getOperand(0))
5711        return DAG.getNode(ISD::FABS, DL, VT, N0);
5712
5713      // select (setl[te] X, +/-0.0), fneg(X), X -> fabs
5714      if ((CC == ISD::SETLT || CC == ISD::SETLE) &&
5715          N0 == N3 && N2.getOpcode() == ISD::FNEG &&
5716          N2.getOperand(0) == N3)
5717        return DAG.getNode(ISD::FABS, DL, VT, N3);
5718    }
5719  }
5720
5721  // Check to see if we can perform the "gzip trick", transforming
5722  // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
5723  if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
5724      N0.getValueType().isInteger() &&
5725      N2.getValueType().isInteger() &&
5726      (N1C->isNullValue() ||                         // (a < 0) ? b : 0
5727       (N1C->getAPIntValue() == 1 && N0 == N2))) {   // (a < 1) ? a : 0
5728    MVT XType = N0.getValueType();
5729    MVT AType = N2.getValueType();
5730    if (XType.bitsGE(AType)) {
5731      // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
5732      // single-bit constant.
5733      if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) {
5734        unsigned ShCtV = N2C->getAPIntValue().logBase2();
5735        ShCtV = XType.getSizeInBits()-ShCtV-1;
5736        SDValue ShCt = DAG.getConstant(ShCtV, getShiftAmountTy());
5737        SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(),
5738                                    XType, N0, ShCt);
5739        AddToWorkList(Shift.getNode());
5740
5741        if (XType.bitsGT(AType)) {
5742          Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
5743          AddToWorkList(Shift.getNode());
5744        }
5745
5746        return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
5747      }
5748
5749      SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(),
5750                                  XType, N0,
5751                                  DAG.getConstant(XType.getSizeInBits()-1,
5752                                                  getShiftAmountTy()));
5753      AddToWorkList(Shift.getNode());
5754
5755      if (XType.bitsGT(AType)) {
5756        Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
5757        AddToWorkList(Shift.getNode());
5758      }
5759
5760      return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
5761    }
5762  }
5763
5764  // fold select C, 16, 0 -> shl C, 4
5765  if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
5766      TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent) {
5767
5768    // If the caller doesn't want us to simplify this into a zext of a compare,
5769    // don't do it.
5770    if (NotExtCompare && N2C->getAPIntValue() == 1)
5771      return SDValue();
5772
5773    // Get a SetCC of the condition
5774    // FIXME: Should probably make sure that setcc is legal if we ever have a
5775    // target where it isn't.
5776    SDValue Temp, SCC;
5777    // cast from setcc result type to select result type
5778    if (LegalTypes) {
5779      SCC  = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()),
5780                          N0, N1, CC);
5781      if (N2.getValueType().bitsLT(SCC.getValueType()))
5782        Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), N2.getValueType());
5783      else
5784        Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
5785                           N2.getValueType(), SCC);
5786    } else {
5787      SCC  = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC);
5788      Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
5789                         N2.getValueType(), SCC);
5790    }
5791
5792    AddToWorkList(SCC.getNode());
5793    AddToWorkList(Temp.getNode());
5794
5795    if (N2C->getAPIntValue() == 1)
5796      return Temp;
5797
5798    // shl setcc result by log2 n2c
5799    return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
5800                       DAG.getConstant(N2C->getAPIntValue().logBase2(),
5801                                       getShiftAmountTy()));
5802  }
5803
5804  // Check to see if this is the equivalent of setcc
5805  // FIXME: Turn all of these into setcc if setcc if setcc is legal
5806  // otherwise, go ahead with the folds.
5807  if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
5808    MVT XType = N0.getValueType();
5809    if (!LegalOperations ||
5810        TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(XType))) {
5811      SDValue Res = DAG.getSetCC(DL, TLI.getSetCCResultType(XType), N0, N1, CC);
5812      if (Res.getValueType() != VT)
5813        Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res);
5814      return Res;
5815    }
5816
5817    // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X))))
5818    if (N1C && N1C->isNullValue() && CC == ISD::SETEQ &&
5819        (!LegalOperations ||
5820         TLI.isOperationLegal(ISD::CTLZ, XType))) {
5821      SDValue Ctlz = DAG.getNode(ISD::CTLZ, N0.getDebugLoc(), XType, N0);
5822      return DAG.getNode(ISD::SRL, DL, XType, Ctlz,
5823                         DAG.getConstant(Log2_32(XType.getSizeInBits()),
5824                                         getShiftAmountTy()));
5825    }
5826    // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
5827    if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
5828      SDValue NegN0 = DAG.getNode(ISD::SUB, N0.getDebugLoc(),
5829                                  XType, DAG.getConstant(0, XType), N0);
5830      SDValue NotN0 = DAG.getNOT(N0.getDebugLoc(), N0, XType);
5831      return DAG.getNode(ISD::SRL, DL, XType,
5832                         DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0),
5833                         DAG.getConstant(XType.getSizeInBits()-1,
5834                                         getShiftAmountTy()));
5835    }
5836    // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
5837    if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
5838      SDValue Sign = DAG.getNode(ISD::SRL, N0.getDebugLoc(), XType, N0,
5839                                 DAG.getConstant(XType.getSizeInBits()-1,
5840                                                 getShiftAmountTy()));
5841      return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType));
5842    }
5843  }
5844
5845  // Check to see if this is an integer abs. select_cc setl[te] X, 0, -X, X ->
5846  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
5847  if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
5848      N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
5849      N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
5850    MVT XType = N0.getValueType();
5851    SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, N0,
5852                                DAG.getConstant(XType.getSizeInBits()-1,
5853                                                getShiftAmountTy()));
5854    SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), XType,
5855                              N0, Shift);
5856    AddToWorkList(Shift.getNode());
5857    AddToWorkList(Add.getNode());
5858    return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
5859  }
5860  // Check to see if this is an integer abs. select_cc setgt X, -1, X, -X ->
5861  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
5862  if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
5863      N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
5864    if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
5865      MVT XType = N0.getValueType();
5866      if (SubC->isNullValue() && XType.isInteger()) {
5867        SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
5868                                    N0,
5869                                    DAG.getConstant(XType.getSizeInBits()-1,
5870                                                    getShiftAmountTy()));
5871        SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
5872                                  XType, N0, Shift);
5873        AddToWorkList(Shift.getNode());
5874        AddToWorkList(Add.getNode());
5875        return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
5876      }
5877    }
5878  }
5879
5880  return SDValue();
5881}
5882
5883/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC.
5884SDValue DAGCombiner::SimplifySetCC(MVT VT, SDValue N0,
5885                                   SDValue N1, ISD::CondCode Cond,
5886                                   bool foldBooleans) {
5887  TargetLowering::DAGCombinerInfo
5888    DagCombineInfo(DAG, Level == Unrestricted, false, this);
5889  return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo);
5890}
5891
5892/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
5893/// return a DAG expression to select that will generate the same value by
5894/// multiplying by a magic number.  See:
5895/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
5896SDValue DAGCombiner::BuildSDIV(SDNode *N) {
5897  std::vector<SDNode*> Built;
5898  SDValue S = TLI.BuildSDIV(N, DAG, &Built);
5899
5900  for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
5901       ii != ee; ++ii)
5902    AddToWorkList(*ii);
5903  return S;
5904}
5905
5906/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
5907/// return a DAG expression to select that will generate the same value by
5908/// multiplying by a magic number.  See:
5909/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
5910SDValue DAGCombiner::BuildUDIV(SDNode *N) {
5911  std::vector<SDNode*> Built;
5912  SDValue S = TLI.BuildUDIV(N, DAG, &Built);
5913
5914  for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
5915       ii != ee; ++ii)
5916    AddToWorkList(*ii);
5917  return S;
5918}
5919
5920/// FindBaseOffset - Return true if base is known not to alias with anything
5921/// but itself.  Provides base object and offset as results.
5922static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset) {
5923  // Assume it is a primitive operation.
5924  Base = Ptr; Offset = 0;
5925
5926  // If it's an adding a simple constant then integrate the offset.
5927  if (Base.getOpcode() == ISD::ADD) {
5928    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) {
5929      Base = Base.getOperand(0);
5930      Offset += C->getZExtValue();
5931    }
5932  }
5933
5934  // If it's any of the following then it can't alias with anything but itself.
5935  return isa<FrameIndexSDNode>(Base) ||
5936         isa<ConstantPoolSDNode>(Base) ||
5937         isa<GlobalAddressSDNode>(Base);
5938}
5939
5940/// isAlias - Return true if there is any possibility that the two addresses
5941/// overlap.
5942bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
5943                          const Value *SrcValue1, int SrcValueOffset1,
5944                          SDValue Ptr2, int64_t Size2,
5945                          const Value *SrcValue2, int SrcValueOffset2) const {
5946  // If they are the same then they must be aliases.
5947  if (Ptr1 == Ptr2) return true;
5948
5949  // Gather base node and offset information.
5950  SDValue Base1, Base2;
5951  int64_t Offset1, Offset2;
5952  bool KnownBase1 = FindBaseOffset(Ptr1, Base1, Offset1);
5953  bool KnownBase2 = FindBaseOffset(Ptr2, Base2, Offset2);
5954
5955  // If they have a same base address then...
5956  if (Base1 == Base2)
5957    // Check to see if the addresses overlap.
5958    return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
5959
5960  // If we know both bases then they can't alias.
5961  if (KnownBase1 && KnownBase2) return false;
5962
5963  if (CombinerGlobalAA) {
5964    // Use alias analysis information.
5965    int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
5966    int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset;
5967    int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset;
5968    AliasAnalysis::AliasResult AAResult =
5969                             AA.alias(SrcValue1, Overlap1, SrcValue2, Overlap2);
5970    if (AAResult == AliasAnalysis::NoAlias)
5971      return false;
5972  }
5973
5974  // Otherwise we have to assume they alias.
5975  return true;
5976}
5977
5978/// FindAliasInfo - Extracts the relevant alias information from the memory
5979/// node.  Returns true if the operand was a load.
5980bool DAGCombiner::FindAliasInfo(SDNode *N,
5981                        SDValue &Ptr, int64_t &Size,
5982                        const Value *&SrcValue, int &SrcValueOffset) const {
5983  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
5984    Ptr = LD->getBasePtr();
5985    Size = LD->getMemoryVT().getSizeInBits() >> 3;
5986    SrcValue = LD->getSrcValue();
5987    SrcValueOffset = LD->getSrcValueOffset();
5988    return true;
5989  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
5990    Ptr = ST->getBasePtr();
5991    Size = ST->getMemoryVT().getSizeInBits() >> 3;
5992    SrcValue = ST->getSrcValue();
5993    SrcValueOffset = ST->getSrcValueOffset();
5994  } else {
5995    assert(0 && "FindAliasInfo expected a memory operand");
5996  }
5997
5998  return false;
5999}
6000
6001/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
6002/// looking for aliasing nodes and adding them to the Aliases vector.
6003void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
6004                                   SmallVector<SDValue, 8> &Aliases) {
6005  SmallVector<SDValue, 8> Chains;     // List of chains to visit.
6006  std::set<SDNode *> Visited;           // Visited node set.
6007
6008  // Get alias information for node.
6009  SDValue Ptr;
6010  int64_t Size;
6011  const Value *SrcValue;
6012  int SrcValueOffset;
6013  bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset);
6014
6015  // Starting off.
6016  Chains.push_back(OriginalChain);
6017
6018  // Look at each chain and determine if it is an alias.  If so, add it to the
6019  // aliases list.  If not, then continue up the chain looking for the next
6020  // candidate.
6021  while (!Chains.empty()) {
6022    SDValue Chain = Chains.back();
6023    Chains.pop_back();
6024
6025     // Don't bother if we've been before.
6026    if (Visited.find(Chain.getNode()) != Visited.end()) continue;
6027    Visited.insert(Chain.getNode());
6028
6029    switch (Chain.getOpcode()) {
6030    case ISD::EntryToken:
6031      // Entry token is ideal chain operand, but handled in FindBetterChain.
6032      break;
6033
6034    case ISD::LOAD:
6035    case ISD::STORE: {
6036      // Get alias information for Chain.
6037      SDValue OpPtr;
6038      int64_t OpSize;
6039      const Value *OpSrcValue;
6040      int OpSrcValueOffset;
6041      bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
6042                                    OpSrcValue, OpSrcValueOffset);
6043
6044      // If chain is alias then stop here.
6045      if (!(IsLoad && IsOpLoad) &&
6046          isAlias(Ptr, Size, SrcValue, SrcValueOffset,
6047                  OpPtr, OpSize, OpSrcValue, OpSrcValueOffset)) {
6048        Aliases.push_back(Chain);
6049      } else {
6050        // Look further up the chain.
6051        Chains.push_back(Chain.getOperand(0));
6052        // Clean up old chain.
6053        AddToWorkList(Chain.getNode());
6054      }
6055      break;
6056    }
6057
6058    case ISD::TokenFactor:
6059      // We have to check each of the operands of the token factor, so we queue
6060      // then up.  Adding the  operands to the queue (stack) in reverse order
6061      // maintains the original order and increases the likelihood that getNode
6062      // will find a matching token factor (CSE.)
6063      for (unsigned n = Chain.getNumOperands(); n;)
6064        Chains.push_back(Chain.getOperand(--n));
6065      // Eliminate the token factor if we can.
6066      AddToWorkList(Chain.getNode());
6067      break;
6068
6069    default:
6070      // For all other instructions we will just have to take what we can get.
6071      Aliases.push_back(Chain);
6072      break;
6073    }
6074  }
6075}
6076
6077/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking
6078/// for a better chain (aliasing node.)
6079SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
6080  SmallVector<SDValue, 8> Aliases;  // Ops for replacing token factor.
6081
6082  // Accumulate all the aliases to this node.
6083  GatherAllAliases(N, OldChain, Aliases);
6084
6085  if (Aliases.size() == 0) {
6086    // If no operands then chain to entry token.
6087    return DAG.getEntryNode();
6088  } else if (Aliases.size() == 1) {
6089    // If a single operand then chain to it.  We don't need to revisit it.
6090    return Aliases[0];
6091  }
6092
6093  // Construct a custom tailored token factor.
6094  SDValue NewChain = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
6095                                 &Aliases[0], Aliases.size());
6096
6097  // Make sure the old chain gets cleaned up.
6098  if (NewChain != OldChain) AddToWorkList(OldChain.getNode());
6099
6100  return NewChain;
6101}
6102
6103// SelectionDAG::Combine - This is the entry point for the file.
6104//
6105void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, bool Fast) {
6106  /// run - This is the main entry point to this class.
6107  ///
6108  DAGCombiner(*this, AA, Fast).Run(Level);
6109}
6110