DAGCombiner.cpp revision 16c29b5f285f375be53dabaa73e3e91107485fe4
1//===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass combines dag nodes to form fewer, simpler DAG nodes.  It can be run
11// both before and after the DAG is legalized.
12//
13// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
14// primarily intended to handle simplification opportunities that are implicit
15// in the LLVM IR and exposed by the various codegen lowering phases.
16//
17//===----------------------------------------------------------------------===//
18
19#define DEBUG_TYPE "dagcombine"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/DerivedTypes.h"
22#include "llvm/LLVMContext.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/PseudoSourceValue.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Target/TargetData.h"
28#include "llvm/Target/TargetLowering.h"
29#include "llvm/Target/TargetMachine.h"
30#include "llvm/Target/TargetOptions.h"
31#include "llvm/ADT/SmallPtrSet.h"
32#include "llvm/ADT/Statistic.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/MathExtras.h"
37#include "llvm/Support/raw_ostream.h"
38#include <algorithm>
39using namespace llvm;
40
41STATISTIC(NodesCombined   , "Number of dag nodes combined");
42STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
43STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
44STATISTIC(OpsNarrowed     , "Number of load/op/store narrowed");
45
46namespace {
47  static cl::opt<bool>
48    CombinerAA("combiner-alias-analysis", cl::Hidden,
49               cl::desc("Turn on alias analysis during testing"));
50
51  static cl::opt<bool>
52    CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
53               cl::desc("Include global information in alias analysis"));
54
55//------------------------------ DAGCombiner ---------------------------------//
56
57  class DAGCombiner {
58    SelectionDAG &DAG;
59    const TargetLowering &TLI;
60    CombineLevel Level;
61    CodeGenOpt::Level OptLevel;
62    bool LegalOperations;
63    bool LegalTypes;
64
65    // Worklist of all of the nodes that need to be simplified.
66    std::vector<SDNode*> WorkList;
67
68    // AA - Used for DAG load/store alias analysis.
69    AliasAnalysis &AA;
70
71    /// AddUsersToWorkList - When an instruction is simplified, add all users of
72    /// the instruction to the work lists because they might get more simplified
73    /// now.
74    ///
75    void AddUsersToWorkList(SDNode *N) {
76      for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
77           UI != UE; ++UI)
78        AddToWorkList(*UI);
79    }
80
81    /// visit - call the node-specific routine that knows how to fold each
82    /// particular type of node.
83    SDValue visit(SDNode *N);
84
85  public:
86    /// AddToWorkList - Add to the work list making sure it's instance is at the
87    /// the back (next to be processed.)
88    void AddToWorkList(SDNode *N) {
89      removeFromWorkList(N);
90      WorkList.push_back(N);
91    }
92
93    /// removeFromWorkList - remove all instances of N from the worklist.
94    ///
95    void removeFromWorkList(SDNode *N) {
96      WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), N),
97                     WorkList.end());
98    }
99
100    SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
101                      bool AddTo = true);
102
103    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
104      return CombineTo(N, &Res, 1, AddTo);
105    }
106
107    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
108                      bool AddTo = true) {
109      SDValue To[] = { Res0, Res1 };
110      return CombineTo(N, To, 2, AddTo);
111    }
112
113    void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
114
115  private:
116
117    /// SimplifyDemandedBits - Check the specified integer node value to see if
118    /// it can be simplified or if things it uses can be simplified by bit
119    /// propagation.  If so, return true.
120    bool SimplifyDemandedBits(SDValue Op) {
121      unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
122      APInt Demanded = APInt::getAllOnesValue(BitWidth);
123      return SimplifyDemandedBits(Op, Demanded);
124    }
125
126    bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded);
127
128    bool CombineToPreIndexedLoadStore(SDNode *N);
129    bool CombineToPostIndexedLoadStore(SDNode *N);
130
131    void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
132    SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
133    SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
134    SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
135    SDValue PromoteIntBinOp(SDValue Op);
136    SDValue PromoteIntShiftOp(SDValue Op);
137    SDValue PromoteExtend(SDValue Op);
138    bool PromoteLoad(SDValue Op);
139
140    /// combine - call the node-specific routine that knows how to fold each
141    /// particular type of node. If that doesn't do anything, try the
142    /// target-specific DAG combines.
143    SDValue combine(SDNode *N);
144
145    // Visitation implementation - Implement dag node combining for different
146    // node types.  The semantics are as follows:
147    // Return Value:
148    //   SDValue.getNode() == 0 - No change was made
149    //   SDValue.getNode() == N - N was replaced, is dead and has been handled.
150    //   otherwise              - N should be replaced by the returned Operand.
151    //
152    SDValue visitTokenFactor(SDNode *N);
153    SDValue visitMERGE_VALUES(SDNode *N);
154    SDValue visitADD(SDNode *N);
155    SDValue visitSUB(SDNode *N);
156    SDValue visitADDC(SDNode *N);
157    SDValue visitADDE(SDNode *N);
158    SDValue visitMUL(SDNode *N);
159    SDValue visitSDIV(SDNode *N);
160    SDValue visitUDIV(SDNode *N);
161    SDValue visitSREM(SDNode *N);
162    SDValue visitUREM(SDNode *N);
163    SDValue visitMULHU(SDNode *N);
164    SDValue visitMULHS(SDNode *N);
165    SDValue visitSMUL_LOHI(SDNode *N);
166    SDValue visitUMUL_LOHI(SDNode *N);
167    SDValue visitSDIVREM(SDNode *N);
168    SDValue visitUDIVREM(SDNode *N);
169    SDValue visitAND(SDNode *N);
170    SDValue visitOR(SDNode *N);
171    SDValue visitXOR(SDNode *N);
172    SDValue SimplifyVBinOp(SDNode *N);
173    SDValue visitSHL(SDNode *N);
174    SDValue visitSRA(SDNode *N);
175    SDValue visitSRL(SDNode *N);
176    SDValue visitCTLZ(SDNode *N);
177    SDValue visitCTTZ(SDNode *N);
178    SDValue visitCTPOP(SDNode *N);
179    SDValue visitSELECT(SDNode *N);
180    SDValue visitSELECT_CC(SDNode *N);
181    SDValue visitSETCC(SDNode *N);
182    SDValue visitSIGN_EXTEND(SDNode *N);
183    SDValue visitZERO_EXTEND(SDNode *N);
184    SDValue visitANY_EXTEND(SDNode *N);
185    SDValue visitSIGN_EXTEND_INREG(SDNode *N);
186    SDValue visitTRUNCATE(SDNode *N);
187    SDValue visitBITCAST(SDNode *N);
188    SDValue visitBUILD_PAIR(SDNode *N);
189    SDValue visitFADD(SDNode *N);
190    SDValue visitFSUB(SDNode *N);
191    SDValue visitFMUL(SDNode *N);
192    SDValue visitFDIV(SDNode *N);
193    SDValue visitFREM(SDNode *N);
194    SDValue visitFCOPYSIGN(SDNode *N);
195    SDValue visitSINT_TO_FP(SDNode *N);
196    SDValue visitUINT_TO_FP(SDNode *N);
197    SDValue visitFP_TO_SINT(SDNode *N);
198    SDValue visitFP_TO_UINT(SDNode *N);
199    SDValue visitFP_ROUND(SDNode *N);
200    SDValue visitFP_ROUND_INREG(SDNode *N);
201    SDValue visitFP_EXTEND(SDNode *N);
202    SDValue visitFNEG(SDNode *N);
203    SDValue visitFABS(SDNode *N);
204    SDValue visitBRCOND(SDNode *N);
205    SDValue visitBR_CC(SDNode *N);
206    SDValue visitLOAD(SDNode *N);
207    SDValue visitSTORE(SDNode *N);
208    SDValue visitINSERT_VECTOR_ELT(SDNode *N);
209    SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
210    SDValue visitBUILD_VECTOR(SDNode *N);
211    SDValue visitCONCAT_VECTORS(SDNode *N);
212    SDValue visitVECTOR_SHUFFLE(SDNode *N);
213    SDValue visitMEMBARRIER(SDNode *N);
214
215    SDValue XformToShuffleWithZero(SDNode *N);
216    SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
217
218    SDValue visitShiftByConstant(SDNode *N, unsigned Amt);
219
220    bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
221    SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
222    SDValue SimplifySelect(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2);
223    SDValue SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2,
224                             SDValue N3, ISD::CondCode CC,
225                             bool NotExtCompare = false);
226    SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
227                          DebugLoc DL, bool foldBooleans = true);
228    SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
229                                         unsigned HiOp);
230    SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
231    SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
232    SDValue BuildSDIV(SDNode *N);
233    SDValue BuildUDIV(SDNode *N);
234    SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
235    SDValue ReduceLoadWidth(SDNode *N);
236    SDValue ReduceLoadOpStoreWidth(SDNode *N);
237
238    SDValue GetDemandedBits(SDValue V, const APInt &Mask);
239
240    /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
241    /// looking for aliasing nodes and adding them to the Aliases vector.
242    void GatherAllAliases(SDNode *N, SDValue OriginalChain,
243                          SmallVector<SDValue, 8> &Aliases);
244
245    /// isAlias - Return true if there is any possibility that the two addresses
246    /// overlap.
247    bool isAlias(SDValue Ptr1, int64_t Size1,
248                 const Value *SrcValue1, int SrcValueOffset1,
249                 unsigned SrcValueAlign1,
250                 const MDNode *TBAAInfo1,
251                 SDValue Ptr2, int64_t Size2,
252                 const Value *SrcValue2, int SrcValueOffset2,
253                 unsigned SrcValueAlign2,
254                 const MDNode *TBAAInfo2) const;
255
256    /// FindAliasInfo - Extracts the relevant alias information from the memory
257    /// node.  Returns true if the operand was a load.
258    bool FindAliasInfo(SDNode *N,
259                       SDValue &Ptr, int64_t &Size,
260                       const Value *&SrcValue, int &SrcValueOffset,
261                       unsigned &SrcValueAlignment,
262                       const MDNode *&TBAAInfo) const;
263
264    /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
265    /// looking for a better chain (aliasing node.)
266    SDValue FindBetterChain(SDNode *N, SDValue Chain);
267
268  public:
269    DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
270      : DAG(D), TLI(D.getTargetLoweringInfo()), Level(Unrestricted),
271        OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {}
272
273    /// Run - runs the dag combiner on all nodes in the work list
274    void Run(CombineLevel AtLevel);
275
276    SelectionDAG &getDAG() const { return DAG; }
277
278    /// getShiftAmountTy - Returns a type large enough to hold any valid
279    /// shift amount - before type legalization these can be huge.
280    EVT getShiftAmountTy() {
281      return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
282    }
283
284    /// isTypeLegal - This method returns true if we are running before type
285    /// legalization or if the specified VT is legal.
286    bool isTypeLegal(const EVT &VT) {
287      if (!LegalTypes) return true;
288      return TLI.isTypeLegal(VT);
289    }
290  };
291}
292
293
294namespace {
295/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted
296/// nodes from the worklist.
297class WorkListRemover : public SelectionDAG::DAGUpdateListener {
298  DAGCombiner &DC;
299public:
300  explicit WorkListRemover(DAGCombiner &dc) : DC(dc) {}
301
302  virtual void NodeDeleted(SDNode *N, SDNode *E) {
303    DC.removeFromWorkList(N);
304  }
305
306  virtual void NodeUpdated(SDNode *N) {
307    // Ignore updates.
308  }
309};
310}
311
312//===----------------------------------------------------------------------===//
313//  TargetLowering::DAGCombinerInfo implementation
314//===----------------------------------------------------------------------===//
315
316void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
317  ((DAGCombiner*)DC)->AddToWorkList(N);
318}
319
320SDValue TargetLowering::DAGCombinerInfo::
321CombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo) {
322  return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
323}
324
325SDValue TargetLowering::DAGCombinerInfo::
326CombineTo(SDNode *N, SDValue Res, bool AddTo) {
327  return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
328}
329
330
331SDValue TargetLowering::DAGCombinerInfo::
332CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
333  return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
334}
335
336void TargetLowering::DAGCombinerInfo::
337CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
338  return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
339}
340
341//===----------------------------------------------------------------------===//
342// Helper Functions
343//===----------------------------------------------------------------------===//
344
345/// isNegatibleForFree - Return 1 if we can compute the negated form of the
346/// specified expression for the same cost as the expression itself, or 2 if we
347/// can compute the negated form more cheaply than the expression itself.
348static char isNegatibleForFree(SDValue Op, bool LegalOperations,
349                               unsigned Depth = 0) {
350  // No compile time optimizations on this type.
351  if (Op.getValueType() == MVT::ppcf128)
352    return 0;
353
354  // fneg is removable even if it has multiple uses.
355  if (Op.getOpcode() == ISD::FNEG) return 2;
356
357  // Don't allow anything with multiple uses.
358  if (!Op.hasOneUse()) return 0;
359
360  // Don't recurse exponentially.
361  if (Depth > 6) return 0;
362
363  switch (Op.getOpcode()) {
364  default: return false;
365  case ISD::ConstantFP:
366    // Don't invert constant FP values after legalize.  The negated constant
367    // isn't necessarily legal.
368    return LegalOperations ? 0 : 1;
369  case ISD::FADD:
370    // FIXME: determine better conditions for this xform.
371    if (!UnsafeFPMath) return 0;
372
373    // fold (fsub (fadd A, B)) -> (fsub (fneg A), B)
374    if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
375      return V;
376    // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
377    return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
378  case ISD::FSUB:
379    // We can't turn -(A-B) into B-A when we honor signed zeros.
380    if (!UnsafeFPMath) return 0;
381
382    // fold (fneg (fsub A, B)) -> (fsub B, A)
383    return 1;
384
385  case ISD::FMUL:
386  case ISD::FDIV:
387    if (HonorSignDependentRoundingFPMath()) return 0;
388
389    // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
390    if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
391      return V;
392
393    return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
394
395  case ISD::FP_EXTEND:
396  case ISD::FP_ROUND:
397  case ISD::FSIN:
398    return isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1);
399  }
400}
401
402/// GetNegatedExpression - If isNegatibleForFree returns true, this function
403/// returns the newly negated expression.
404static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
405                                    bool LegalOperations, unsigned Depth = 0) {
406  // fneg is removable even if it has multiple uses.
407  if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
408
409  // Don't allow anything with multiple uses.
410  assert(Op.hasOneUse() && "Unknown reuse!");
411
412  assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
413  switch (Op.getOpcode()) {
414  default: llvm_unreachable("Unknown code");
415  case ISD::ConstantFP: {
416    APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
417    V.changeSign();
418    return DAG.getConstantFP(V, Op.getValueType());
419  }
420  case ISD::FADD:
421    // FIXME: determine better conditions for this xform.
422    assert(UnsafeFPMath);
423
424    // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
425    if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
426      return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
427                         GetNegatedExpression(Op.getOperand(0), DAG,
428                                              LegalOperations, Depth+1),
429                         Op.getOperand(1));
430    // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
431    return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
432                       GetNegatedExpression(Op.getOperand(1), DAG,
433                                            LegalOperations, Depth+1),
434                       Op.getOperand(0));
435  case ISD::FSUB:
436    // We can't turn -(A-B) into B-A when we honor signed zeros.
437    assert(UnsafeFPMath);
438
439    // fold (fneg (fsub 0, B)) -> B
440    if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
441      if (N0CFP->getValueAPF().isZero())
442        return Op.getOperand(1);
443
444    // fold (fneg (fsub A, B)) -> (fsub B, A)
445    return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
446                       Op.getOperand(1), Op.getOperand(0));
447
448  case ISD::FMUL:
449  case ISD::FDIV:
450    assert(!HonorSignDependentRoundingFPMath());
451
452    // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
453    if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
454      return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
455                         GetNegatedExpression(Op.getOperand(0), DAG,
456                                              LegalOperations, Depth+1),
457                         Op.getOperand(1));
458
459    // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
460    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
461                       Op.getOperand(0),
462                       GetNegatedExpression(Op.getOperand(1), DAG,
463                                            LegalOperations, Depth+1));
464
465  case ISD::FP_EXTEND:
466  case ISD::FSIN:
467    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
468                       GetNegatedExpression(Op.getOperand(0), DAG,
469                                            LegalOperations, Depth+1));
470  case ISD::FP_ROUND:
471      return DAG.getNode(ISD::FP_ROUND, Op.getDebugLoc(), Op.getValueType(),
472                         GetNegatedExpression(Op.getOperand(0), DAG,
473                                              LegalOperations, Depth+1),
474                         Op.getOperand(1));
475  }
476}
477
478
479// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc
480// that selects between the values 1 and 0, making it equivalent to a setcc.
481// Also, set the incoming LHS, RHS, and CC references to the appropriate
482// nodes based on the type of node we are checking.  This simplifies life a
483// bit for the callers.
484static bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
485                              SDValue &CC) {
486  if (N.getOpcode() == ISD::SETCC) {
487    LHS = N.getOperand(0);
488    RHS = N.getOperand(1);
489    CC  = N.getOperand(2);
490    return true;
491  }
492  if (N.getOpcode() == ISD::SELECT_CC &&
493      N.getOperand(2).getOpcode() == ISD::Constant &&
494      N.getOperand(3).getOpcode() == ISD::Constant &&
495      cast<ConstantSDNode>(N.getOperand(2))->getAPIntValue() == 1 &&
496      cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) {
497    LHS = N.getOperand(0);
498    RHS = N.getOperand(1);
499    CC  = N.getOperand(4);
500    return true;
501  }
502  return false;
503}
504
505// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only
506// one use.  If this is true, it allows the users to invert the operation for
507// free when it is profitable to do so.
508static bool isOneUseSetCC(SDValue N) {
509  SDValue N0, N1, N2;
510  if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
511    return true;
512  return false;
513}
514
515SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL,
516                                    SDValue N0, SDValue N1) {
517  EVT VT = N0.getValueType();
518  if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
519    if (isa<ConstantSDNode>(N1)) {
520      // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
521      SDValue OpNode =
522        DAG.FoldConstantArithmetic(Opc, VT,
523                                   cast<ConstantSDNode>(N0.getOperand(1)),
524                                   cast<ConstantSDNode>(N1));
525      return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
526    } else if (N0.hasOneUse()) {
527      // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use
528      SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT,
529                                   N0.getOperand(0), N1);
530      AddToWorkList(OpNode.getNode());
531      return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
532    }
533  }
534
535  if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) {
536    if (isa<ConstantSDNode>(N0)) {
537      // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
538      SDValue OpNode =
539        DAG.FoldConstantArithmetic(Opc, VT,
540                                   cast<ConstantSDNode>(N1.getOperand(1)),
541                                   cast<ConstantSDNode>(N0));
542      return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
543    } else if (N1.hasOneUse()) {
544      // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use
545      SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT,
546                                   N1.getOperand(0), N0);
547      AddToWorkList(OpNode.getNode());
548      return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
549    }
550  }
551
552  return SDValue();
553}
554
555SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
556                               bool AddTo) {
557  assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
558  ++NodesCombined;
559  DEBUG(dbgs() << "\nReplacing.1 ";
560        N->dump(&DAG);
561        dbgs() << "\nWith: ";
562        To[0].getNode()->dump(&DAG);
563        dbgs() << " and " << NumTo-1 << " other values\n";
564        for (unsigned i = 0, e = NumTo; i != e; ++i)
565          assert((!To[i].getNode() ||
566                  N->getValueType(i) == To[i].getValueType()) &&
567                 "Cannot combine value to value of different type!"));
568  WorkListRemover DeadNodes(*this);
569  DAG.ReplaceAllUsesWith(N, To, &DeadNodes);
570
571  if (AddTo) {
572    // Push the new nodes and any users onto the worklist
573    for (unsigned i = 0, e = NumTo; i != e; ++i) {
574      if (To[i].getNode()) {
575        AddToWorkList(To[i].getNode());
576        AddUsersToWorkList(To[i].getNode());
577      }
578    }
579  }
580
581  // Finally, if the node is now dead, remove it from the graph.  The node
582  // may not be dead if the replacement process recursively simplified to
583  // something else needing this node.
584  if (N->use_empty()) {
585    // Nodes can be reintroduced into the worklist.  Make sure we do not
586    // process a node that has been replaced.
587    removeFromWorkList(N);
588
589    // Finally, since the node is now dead, remove it from the graph.
590    DAG.DeleteNode(N);
591  }
592  return SDValue(N, 0);
593}
594
595void DAGCombiner::
596CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
597  // Replace all uses.  If any nodes become isomorphic to other nodes and
598  // are deleted, make sure to remove them from our worklist.
599  WorkListRemover DeadNodes(*this);
600  DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
601
602  // Push the new node and any (possibly new) users onto the worklist.
603  AddToWorkList(TLO.New.getNode());
604  AddUsersToWorkList(TLO.New.getNode());
605
606  // Finally, if the node is now dead, remove it from the graph.  The node
607  // may not be dead if the replacement process recursively simplified to
608  // something else needing this node.
609  if (TLO.Old.getNode()->use_empty()) {
610    removeFromWorkList(TLO.Old.getNode());
611
612    // If the operands of this node are only used by the node, they will now
613    // be dead.  Make sure to visit them first to delete dead nodes early.
614    for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands(); i != e; ++i)
615      if (TLO.Old.getNode()->getOperand(i).getNode()->hasOneUse())
616        AddToWorkList(TLO.Old.getNode()->getOperand(i).getNode());
617
618    DAG.DeleteNode(TLO.Old.getNode());
619  }
620}
621
622/// SimplifyDemandedBits - Check the specified integer node value to see if
623/// it can be simplified or if things it uses can be simplified by bit
624/// propagation.  If so, return true.
625bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
626  TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
627  APInt KnownZero, KnownOne;
628  if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
629    return false;
630
631  // Revisit the node.
632  AddToWorkList(Op.getNode());
633
634  // Replace the old value with the new one.
635  ++NodesCombined;
636  DEBUG(dbgs() << "\nReplacing.2 ";
637        TLO.Old.getNode()->dump(&DAG);
638        dbgs() << "\nWith: ";
639        TLO.New.getNode()->dump(&DAG);
640        dbgs() << '\n');
641
642  CommitTargetLoweringOpt(TLO);
643  return true;
644}
645
646void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
647  DebugLoc dl = Load->getDebugLoc();
648  EVT VT = Load->getValueType(0);
649  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0));
650
651  DEBUG(dbgs() << "\nReplacing.9 ";
652        Load->dump(&DAG);
653        dbgs() << "\nWith: ";
654        Trunc.getNode()->dump(&DAG);
655        dbgs() << '\n');
656  WorkListRemover DeadNodes(*this);
657  DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc, &DeadNodes);
658  DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1),
659                                &DeadNodes);
660  removeFromWorkList(Load);
661  DAG.DeleteNode(Load);
662  AddToWorkList(Trunc.getNode());
663}
664
665SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
666  Replace = false;
667  DebugLoc dl = Op.getDebugLoc();
668  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
669    EVT MemVT = LD->getMemoryVT();
670    ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
671      ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
672                                                  : ISD::EXTLOAD)
673      : LD->getExtensionType();
674    Replace = true;
675    return DAG.getExtLoad(ExtType, PVT, dl,
676                          LD->getChain(), LD->getBasePtr(),
677                          LD->getPointerInfo(),
678                          MemVT, LD->isVolatile(),
679                          LD->isNonTemporal(), LD->getAlignment());
680  }
681
682  unsigned Opc = Op.getOpcode();
683  switch (Opc) {
684  default: break;
685  case ISD::AssertSext:
686    return DAG.getNode(ISD::AssertSext, dl, PVT,
687                       SExtPromoteOperand(Op.getOperand(0), PVT),
688                       Op.getOperand(1));
689  case ISD::AssertZext:
690    return DAG.getNode(ISD::AssertZext, dl, PVT,
691                       ZExtPromoteOperand(Op.getOperand(0), PVT),
692                       Op.getOperand(1));
693  case ISD::Constant: {
694    unsigned ExtOpc =
695      Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
696    return DAG.getNode(ExtOpc, dl, PVT, Op);
697  }
698  }
699
700  if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
701    return SDValue();
702  return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op);
703}
704
705SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
706  if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
707    return SDValue();
708  EVT OldVT = Op.getValueType();
709  DebugLoc dl = Op.getDebugLoc();
710  bool Replace = false;
711  SDValue NewOp = PromoteOperand(Op, PVT, Replace);
712  if (NewOp.getNode() == 0)
713    return SDValue();
714  AddToWorkList(NewOp.getNode());
715
716  if (Replace)
717    ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
718  return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp,
719                     DAG.getValueType(OldVT));
720}
721
722SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
723  EVT OldVT = Op.getValueType();
724  DebugLoc dl = Op.getDebugLoc();
725  bool Replace = false;
726  SDValue NewOp = PromoteOperand(Op, PVT, Replace);
727  if (NewOp.getNode() == 0)
728    return SDValue();
729  AddToWorkList(NewOp.getNode());
730
731  if (Replace)
732    ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
733  return DAG.getZeroExtendInReg(NewOp, dl, OldVT);
734}
735
736/// PromoteIntBinOp - Promote the specified integer binary operation if the
737/// target indicates it is beneficial. e.g. On x86, it's usually better to
738/// promote i16 operations to i32 since i16 instructions are longer.
739SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
740  if (!LegalOperations)
741    return SDValue();
742
743  EVT VT = Op.getValueType();
744  if (VT.isVector() || !VT.isInteger())
745    return SDValue();
746
747  // If operation type is 'undesirable', e.g. i16 on x86, consider
748  // promoting it.
749  unsigned Opc = Op.getOpcode();
750  if (TLI.isTypeDesirableForOp(Opc, VT))
751    return SDValue();
752
753  EVT PVT = VT;
754  // Consult target whether it is a good idea to promote this operation and
755  // what's the right type to promote it to.
756  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
757    assert(PVT != VT && "Don't know what type to promote to!");
758
759    bool Replace0 = false;
760    SDValue N0 = Op.getOperand(0);
761    SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
762    if (NN0.getNode() == 0)
763      return SDValue();
764
765    bool Replace1 = false;
766    SDValue N1 = Op.getOperand(1);
767    SDValue NN1;
768    if (N0 == N1)
769      NN1 = NN0;
770    else {
771      NN1 = PromoteOperand(N1, PVT, Replace1);
772      if (NN1.getNode() == 0)
773        return SDValue();
774    }
775
776    AddToWorkList(NN0.getNode());
777    if (NN1.getNode())
778      AddToWorkList(NN1.getNode());
779
780    if (Replace0)
781      ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
782    if (Replace1)
783      ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
784
785    DEBUG(dbgs() << "\nPromoting ";
786          Op.getNode()->dump(&DAG));
787    DebugLoc dl = Op.getDebugLoc();
788    return DAG.getNode(ISD::TRUNCATE, dl, VT,
789                       DAG.getNode(Opc, dl, PVT, NN0, NN1));
790  }
791  return SDValue();
792}
793
794/// PromoteIntShiftOp - Promote the specified integer shift operation if the
795/// target indicates it is beneficial. e.g. On x86, it's usually better to
796/// promote i16 operations to i32 since i16 instructions are longer.
797SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
798  if (!LegalOperations)
799    return SDValue();
800
801  EVT VT = Op.getValueType();
802  if (VT.isVector() || !VT.isInteger())
803    return SDValue();
804
805  // If operation type is 'undesirable', e.g. i16 on x86, consider
806  // promoting it.
807  unsigned Opc = Op.getOpcode();
808  if (TLI.isTypeDesirableForOp(Opc, VT))
809    return SDValue();
810
811  EVT PVT = VT;
812  // Consult target whether it is a good idea to promote this operation and
813  // what's the right type to promote it to.
814  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
815    assert(PVT != VT && "Don't know what type to promote to!");
816
817    bool Replace = false;
818    SDValue N0 = Op.getOperand(0);
819    if (Opc == ISD::SRA)
820      N0 = SExtPromoteOperand(Op.getOperand(0), PVT);
821    else if (Opc == ISD::SRL)
822      N0 = ZExtPromoteOperand(Op.getOperand(0), PVT);
823    else
824      N0 = PromoteOperand(N0, PVT, Replace);
825    if (N0.getNode() == 0)
826      return SDValue();
827
828    AddToWorkList(N0.getNode());
829    if (Replace)
830      ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
831
832    DEBUG(dbgs() << "\nPromoting ";
833          Op.getNode()->dump(&DAG));
834    DebugLoc dl = Op.getDebugLoc();
835    return DAG.getNode(ISD::TRUNCATE, dl, VT,
836                       DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1)));
837  }
838  return SDValue();
839}
840
841SDValue DAGCombiner::PromoteExtend(SDValue Op) {
842  if (!LegalOperations)
843    return SDValue();
844
845  EVT VT = Op.getValueType();
846  if (VT.isVector() || !VT.isInteger())
847    return SDValue();
848
849  // If operation type is 'undesirable', e.g. i16 on x86, consider
850  // promoting it.
851  unsigned Opc = Op.getOpcode();
852  if (TLI.isTypeDesirableForOp(Opc, VT))
853    return SDValue();
854
855  EVT PVT = VT;
856  // Consult target whether it is a good idea to promote this operation and
857  // what's the right type to promote it to.
858  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
859    assert(PVT != VT && "Don't know what type to promote to!");
860    // fold (aext (aext x)) -> (aext x)
861    // fold (aext (zext x)) -> (zext x)
862    // fold (aext (sext x)) -> (sext x)
863    DEBUG(dbgs() << "\nPromoting ";
864          Op.getNode()->dump(&DAG));
865    return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), VT, Op.getOperand(0));
866  }
867  return SDValue();
868}
869
870bool DAGCombiner::PromoteLoad(SDValue Op) {
871  if (!LegalOperations)
872    return false;
873
874  EVT VT = Op.getValueType();
875  if (VT.isVector() || !VT.isInteger())
876    return false;
877
878  // If operation type is 'undesirable', e.g. i16 on x86, consider
879  // promoting it.
880  unsigned Opc = Op.getOpcode();
881  if (TLI.isTypeDesirableForOp(Opc, VT))
882    return false;
883
884  EVT PVT = VT;
885  // Consult target whether it is a good idea to promote this operation and
886  // what's the right type to promote it to.
887  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
888    assert(PVT != VT && "Don't know what type to promote to!");
889
890    DebugLoc dl = Op.getDebugLoc();
891    SDNode *N = Op.getNode();
892    LoadSDNode *LD = cast<LoadSDNode>(N);
893    EVT MemVT = LD->getMemoryVT();
894    ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
895      ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
896                                                  : ISD::EXTLOAD)
897      : LD->getExtensionType();
898    SDValue NewLD = DAG.getExtLoad(ExtType, PVT, dl,
899                                   LD->getChain(), LD->getBasePtr(),
900                                   LD->getPointerInfo(),
901                                   MemVT, LD->isVolatile(),
902                                   LD->isNonTemporal(), LD->getAlignment());
903    SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD);
904
905    DEBUG(dbgs() << "\nPromoting ";
906          N->dump(&DAG);
907          dbgs() << "\nTo: ";
908          Result.getNode()->dump(&DAG);
909          dbgs() << '\n');
910    WorkListRemover DeadNodes(*this);
911    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result, &DeadNodes);
912    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1), &DeadNodes);
913    removeFromWorkList(N);
914    DAG.DeleteNode(N);
915    AddToWorkList(Result.getNode());
916    return true;
917  }
918  return false;
919}
920
921
922//===----------------------------------------------------------------------===//
923//  Main DAG Combiner implementation
924//===----------------------------------------------------------------------===//
925
926void DAGCombiner::Run(CombineLevel AtLevel) {
927  // set the instance variables, so that the various visit routines may use it.
928  Level = AtLevel;
929  LegalOperations = Level >= NoIllegalOperations;
930  LegalTypes = Level >= NoIllegalTypes;
931
932  // Add all the dag nodes to the worklist.
933  WorkList.reserve(DAG.allnodes_size());
934  for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
935       E = DAG.allnodes_end(); I != E; ++I)
936    WorkList.push_back(I);
937
938  // Create a dummy node (which is not added to allnodes), that adds a reference
939  // to the root node, preventing it from being deleted, and tracking any
940  // changes of the root.
941  HandleSDNode Dummy(DAG.getRoot());
942
943  // The root of the dag may dangle to deleted nodes until the dag combiner is
944  // done.  Set it to null to avoid confusion.
945  DAG.setRoot(SDValue());
946
947  // while the worklist isn't empty, inspect the node on the end of it and
948  // try and combine it.
949  while (!WorkList.empty()) {
950    SDNode *N = WorkList.back();
951    WorkList.pop_back();
952
953    // If N has no uses, it is dead.  Make sure to revisit all N's operands once
954    // N is deleted from the DAG, since they too may now be dead or may have a
955    // reduced number of uses, allowing other xforms.
956    if (N->use_empty() && N != &Dummy) {
957      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
958        AddToWorkList(N->getOperand(i).getNode());
959
960      DAG.DeleteNode(N);
961      continue;
962    }
963
964    SDValue RV = combine(N);
965
966    if (RV.getNode() == 0)
967      continue;
968
969    ++NodesCombined;
970
971    // If we get back the same node we passed in, rather than a new node or
972    // zero, we know that the node must have defined multiple values and
973    // CombineTo was used.  Since CombineTo takes care of the worklist
974    // mechanics for us, we have no work to do in this case.
975    if (RV.getNode() == N)
976      continue;
977
978    assert(N->getOpcode() != ISD::DELETED_NODE &&
979           RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
980           "Node was deleted but visit returned new node!");
981
982    DEBUG(dbgs() << "\nReplacing.3 ";
983          N->dump(&DAG);
984          dbgs() << "\nWith: ";
985          RV.getNode()->dump(&DAG);
986          dbgs() << '\n');
987    WorkListRemover DeadNodes(*this);
988    if (N->getNumValues() == RV.getNode()->getNumValues())
989      DAG.ReplaceAllUsesWith(N, RV.getNode(), &DeadNodes);
990    else {
991      assert(N->getValueType(0) == RV.getValueType() &&
992             N->getNumValues() == 1 && "Type mismatch");
993      SDValue OpV = RV;
994      DAG.ReplaceAllUsesWith(N, &OpV, &DeadNodes);
995    }
996
997    // Push the new node and any users onto the worklist
998    AddToWorkList(RV.getNode());
999    AddUsersToWorkList(RV.getNode());
1000
1001    // Add any uses of the old node to the worklist in case this node is the
1002    // last one that uses them.  They may become dead after this node is
1003    // deleted.
1004    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1005      AddToWorkList(N->getOperand(i).getNode());
1006
1007    // Finally, if the node is now dead, remove it from the graph.  The node
1008    // may not be dead if the replacement process recursively simplified to
1009    // something else needing this node.
1010    if (N->use_empty()) {
1011      // Nodes can be reintroduced into the worklist.  Make sure we do not
1012      // process a node that has been replaced.
1013      removeFromWorkList(N);
1014
1015      // Finally, since the node is now dead, remove it from the graph.
1016      DAG.DeleteNode(N);
1017    }
1018  }
1019
1020  // If the root changed (e.g. it was a dead load, update the root).
1021  DAG.setRoot(Dummy.getValue());
1022}
1023
1024SDValue DAGCombiner::visit(SDNode *N) {
1025  switch (N->getOpcode()) {
1026  default: break;
1027  case ISD::TokenFactor:        return visitTokenFactor(N);
1028  case ISD::MERGE_VALUES:       return visitMERGE_VALUES(N);
1029  case ISD::ADD:                return visitADD(N);
1030  case ISD::SUB:                return visitSUB(N);
1031  case ISD::ADDC:               return visitADDC(N);
1032  case ISD::ADDE:               return visitADDE(N);
1033  case ISD::MUL:                return visitMUL(N);
1034  case ISD::SDIV:               return visitSDIV(N);
1035  case ISD::UDIV:               return visitUDIV(N);
1036  case ISD::SREM:               return visitSREM(N);
1037  case ISD::UREM:               return visitUREM(N);
1038  case ISD::MULHU:              return visitMULHU(N);
1039  case ISD::MULHS:              return visitMULHS(N);
1040  case ISD::SMUL_LOHI:          return visitSMUL_LOHI(N);
1041  case ISD::UMUL_LOHI:          return visitUMUL_LOHI(N);
1042  case ISD::SDIVREM:            return visitSDIVREM(N);
1043  case ISD::UDIVREM:            return visitUDIVREM(N);
1044  case ISD::AND:                return visitAND(N);
1045  case ISD::OR:                 return visitOR(N);
1046  case ISD::XOR:                return visitXOR(N);
1047  case ISD::SHL:                return visitSHL(N);
1048  case ISD::SRA:                return visitSRA(N);
1049  case ISD::SRL:                return visitSRL(N);
1050  case ISD::CTLZ:               return visitCTLZ(N);
1051  case ISD::CTTZ:               return visitCTTZ(N);
1052  case ISD::CTPOP:              return visitCTPOP(N);
1053  case ISD::SELECT:             return visitSELECT(N);
1054  case ISD::SELECT_CC:          return visitSELECT_CC(N);
1055  case ISD::SETCC:              return visitSETCC(N);
1056  case ISD::SIGN_EXTEND:        return visitSIGN_EXTEND(N);
1057  case ISD::ZERO_EXTEND:        return visitZERO_EXTEND(N);
1058  case ISD::ANY_EXTEND:         return visitANY_EXTEND(N);
1059  case ISD::SIGN_EXTEND_INREG:  return visitSIGN_EXTEND_INREG(N);
1060  case ISD::TRUNCATE:           return visitTRUNCATE(N);
1061  case ISD::BITCAST:            return visitBITCAST(N);
1062  case ISD::BUILD_PAIR:         return visitBUILD_PAIR(N);
1063  case ISD::FADD:               return visitFADD(N);
1064  case ISD::FSUB:               return visitFSUB(N);
1065  case ISD::FMUL:               return visitFMUL(N);
1066  case ISD::FDIV:               return visitFDIV(N);
1067  case ISD::FREM:               return visitFREM(N);
1068  case ISD::FCOPYSIGN:          return visitFCOPYSIGN(N);
1069  case ISD::SINT_TO_FP:         return visitSINT_TO_FP(N);
1070  case ISD::UINT_TO_FP:         return visitUINT_TO_FP(N);
1071  case ISD::FP_TO_SINT:         return visitFP_TO_SINT(N);
1072  case ISD::FP_TO_UINT:         return visitFP_TO_UINT(N);
1073  case ISD::FP_ROUND:           return visitFP_ROUND(N);
1074  case ISD::FP_ROUND_INREG:     return visitFP_ROUND_INREG(N);
1075  case ISD::FP_EXTEND:          return visitFP_EXTEND(N);
1076  case ISD::FNEG:               return visitFNEG(N);
1077  case ISD::FABS:               return visitFABS(N);
1078  case ISD::BRCOND:             return visitBRCOND(N);
1079  case ISD::BR_CC:              return visitBR_CC(N);
1080  case ISD::LOAD:               return visitLOAD(N);
1081  case ISD::STORE:              return visitSTORE(N);
1082  case ISD::INSERT_VECTOR_ELT:  return visitINSERT_VECTOR_ELT(N);
1083  case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
1084  case ISD::BUILD_VECTOR:       return visitBUILD_VECTOR(N);
1085  case ISD::CONCAT_VECTORS:     return visitCONCAT_VECTORS(N);
1086  case ISD::VECTOR_SHUFFLE:     return visitVECTOR_SHUFFLE(N);
1087  case ISD::MEMBARRIER:         return visitMEMBARRIER(N);
1088  }
1089  return SDValue();
1090}
1091
1092SDValue DAGCombiner::combine(SDNode *N) {
1093  SDValue RV = visit(N);
1094
1095  // If nothing happened, try a target-specific DAG combine.
1096  if (RV.getNode() == 0) {
1097    assert(N->getOpcode() != ISD::DELETED_NODE &&
1098           "Node was deleted but visit returned NULL!");
1099
1100    if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
1101        TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
1102
1103      // Expose the DAG combiner to the target combiner impls.
1104      TargetLowering::DAGCombinerInfo
1105        DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this);
1106
1107      RV = TLI.PerformDAGCombine(N, DagCombineInfo);
1108    }
1109  }
1110
1111  // If nothing happened still, try promoting the operation.
1112  if (RV.getNode() == 0) {
1113    switch (N->getOpcode()) {
1114    default: break;
1115    case ISD::ADD:
1116    case ISD::SUB:
1117    case ISD::MUL:
1118    case ISD::AND:
1119    case ISD::OR:
1120    case ISD::XOR:
1121      RV = PromoteIntBinOp(SDValue(N, 0));
1122      break;
1123    case ISD::SHL:
1124    case ISD::SRA:
1125    case ISD::SRL:
1126      RV = PromoteIntShiftOp(SDValue(N, 0));
1127      break;
1128    case ISD::SIGN_EXTEND:
1129    case ISD::ZERO_EXTEND:
1130    case ISD::ANY_EXTEND:
1131      RV = PromoteExtend(SDValue(N, 0));
1132      break;
1133    case ISD::LOAD:
1134      if (PromoteLoad(SDValue(N, 0)))
1135        RV = SDValue(N, 0);
1136      break;
1137    }
1138  }
1139
1140  // If N is a commutative binary node, try commuting it to enable more
1141  // sdisel CSE.
1142  if (RV.getNode() == 0 &&
1143      SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
1144      N->getNumValues() == 1) {
1145    SDValue N0 = N->getOperand(0);
1146    SDValue N1 = N->getOperand(1);
1147
1148    // Constant operands are canonicalized to RHS.
1149    if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
1150      SDValue Ops[] = { N1, N0 };
1151      SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(),
1152                                            Ops, 2);
1153      if (CSENode)
1154        return SDValue(CSENode, 0);
1155    }
1156  }
1157
1158  return RV;
1159}
1160
1161/// getInputChainForNode - Given a node, return its input chain if it has one,
1162/// otherwise return a null sd operand.
1163static SDValue getInputChainForNode(SDNode *N) {
1164  if (unsigned NumOps = N->getNumOperands()) {
1165    if (N->getOperand(0).getValueType() == MVT::Other)
1166      return N->getOperand(0);
1167    else if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
1168      return N->getOperand(NumOps-1);
1169    for (unsigned i = 1; i < NumOps-1; ++i)
1170      if (N->getOperand(i).getValueType() == MVT::Other)
1171        return N->getOperand(i);
1172  }
1173  return SDValue();
1174}
1175
1176SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
1177  // If N has two operands, where one has an input chain equal to the other,
1178  // the 'other' chain is redundant.
1179  if (N->getNumOperands() == 2) {
1180    if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
1181      return N->getOperand(0);
1182    if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
1183      return N->getOperand(1);
1184  }
1185
1186  SmallVector<SDNode *, 8> TFs;     // List of token factors to visit.
1187  SmallVector<SDValue, 8> Ops;    // Ops for replacing token factor.
1188  SmallPtrSet<SDNode*, 16> SeenOps;
1189  bool Changed = false;             // If we should replace this token factor.
1190
1191  // Start out with this token factor.
1192  TFs.push_back(N);
1193
1194  // Iterate through token factors.  The TFs grows when new token factors are
1195  // encountered.
1196  for (unsigned i = 0; i < TFs.size(); ++i) {
1197    SDNode *TF = TFs[i];
1198
1199    // Check each of the operands.
1200    for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) {
1201      SDValue Op = TF->getOperand(i);
1202
1203      switch (Op.getOpcode()) {
1204      case ISD::EntryToken:
1205        // Entry tokens don't need to be added to the list. They are
1206        // rededundant.
1207        Changed = true;
1208        break;
1209
1210      case ISD::TokenFactor:
1211        if (Op.hasOneUse() &&
1212            std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) {
1213          // Queue up for processing.
1214          TFs.push_back(Op.getNode());
1215          // Clean up in case the token factor is removed.
1216          AddToWorkList(Op.getNode());
1217          Changed = true;
1218          break;
1219        }
1220        // Fall thru
1221
1222      default:
1223        // Only add if it isn't already in the list.
1224        if (SeenOps.insert(Op.getNode()))
1225          Ops.push_back(Op);
1226        else
1227          Changed = true;
1228        break;
1229      }
1230    }
1231  }
1232
1233  SDValue Result;
1234
1235  // If we've change things around then replace token factor.
1236  if (Changed) {
1237    if (Ops.empty()) {
1238      // The entry token is the only possible outcome.
1239      Result = DAG.getEntryNode();
1240    } else {
1241      // New and improved token factor.
1242      Result = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
1243                           MVT::Other, &Ops[0], Ops.size());
1244    }
1245
1246    // Don't add users to work list.
1247    return CombineTo(N, Result, false);
1248  }
1249
1250  return Result;
1251}
1252
1253/// MERGE_VALUES can always be eliminated.
1254SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
1255  WorkListRemover DeadNodes(*this);
1256  // Replacing results may cause a different MERGE_VALUES to suddenly
1257  // be CSE'd with N, and carry its uses with it. Iterate until no
1258  // uses remain, to ensure that the node can be safely deleted.
1259  do {
1260    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1261      DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i),
1262                                    &DeadNodes);
1263  } while (!N->use_empty());
1264  removeFromWorkList(N);
1265  DAG.DeleteNode(N);
1266  return SDValue(N, 0);   // Return N so it doesn't get rechecked!
1267}
1268
1269static
1270SDValue combineShlAddConstant(DebugLoc DL, SDValue N0, SDValue N1,
1271                              SelectionDAG &DAG) {
1272  EVT VT = N0.getValueType();
1273  SDValue N00 = N0.getOperand(0);
1274  SDValue N01 = N0.getOperand(1);
1275  ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01);
1276
1277  if (N01C && N00.getOpcode() == ISD::ADD && N00.getNode()->hasOneUse() &&
1278      isa<ConstantSDNode>(N00.getOperand(1))) {
1279    // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
1280    N0 = DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT,
1281                     DAG.getNode(ISD::SHL, N00.getDebugLoc(), VT,
1282                                 N00.getOperand(0), N01),
1283                     DAG.getNode(ISD::SHL, N01.getDebugLoc(), VT,
1284                                 N00.getOperand(1), N01));
1285    return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
1286  }
1287
1288  return SDValue();
1289}
1290
1291SDValue DAGCombiner::visitADD(SDNode *N) {
1292  SDValue N0 = N->getOperand(0);
1293  SDValue N1 = N->getOperand(1);
1294  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1295  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1296  EVT VT = N0.getValueType();
1297
1298  // fold vector ops
1299  if (VT.isVector()) {
1300    SDValue FoldedVOp = SimplifyVBinOp(N);
1301    if (FoldedVOp.getNode()) return FoldedVOp;
1302  }
1303
1304  // fold (add x, undef) -> undef
1305  if (N0.getOpcode() == ISD::UNDEF)
1306    return N0;
1307  if (N1.getOpcode() == ISD::UNDEF)
1308    return N1;
1309  // fold (add c1, c2) -> c1+c2
1310  if (N0C && N1C)
1311    return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C);
1312  // canonicalize constant to RHS
1313  if (N0C && !N1C)
1314    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0);
1315  // fold (add x, 0) -> x
1316  if (N1C && N1C->isNullValue())
1317    return N0;
1318  // fold (add Sym, c) -> Sym+c
1319  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1320    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
1321        GA->getOpcode() == ISD::GlobalAddress)
1322      return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT,
1323                                  GA->getOffset() +
1324                                    (uint64_t)N1C->getSExtValue());
1325  // fold ((c1-A)+c2) -> (c1+c2)-A
1326  if (N1C && N0.getOpcode() == ISD::SUB)
1327    if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0)))
1328      return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1329                         DAG.getConstant(N1C->getAPIntValue()+
1330                                         N0C->getAPIntValue(), VT),
1331                         N0.getOperand(1));
1332  // reassociate add
1333  SDValue RADD = ReassociateOps(ISD::ADD, N->getDebugLoc(), N0, N1);
1334  if (RADD.getNode() != 0)
1335    return RADD;
1336  // fold ((0-A) + B) -> B-A
1337  if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
1338      cast<ConstantSDNode>(N0.getOperand(0))->isNullValue())
1339    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, N0.getOperand(1));
1340  // fold (A + (0-B)) -> A-B
1341  if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) &&
1342      cast<ConstantSDNode>(N1.getOperand(0))->isNullValue())
1343    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1.getOperand(1));
1344  // fold (A+(B-A)) -> B
1345  if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
1346    return N1.getOperand(0);
1347  // fold ((B-A)+A) -> B
1348  if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
1349    return N0.getOperand(0);
1350  // fold (A+(B-(A+C))) to (B-C)
1351  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1352      N0 == N1.getOperand(1).getOperand(0))
1353    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0),
1354                       N1.getOperand(1).getOperand(1));
1355  // fold (A+(B-(C+A))) to (B-C)
1356  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
1357      N0 == N1.getOperand(1).getOperand(1))
1358    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0),
1359                       N1.getOperand(1).getOperand(0));
1360  // fold (A+((B-A)+or-C)) to (B+or-C)
1361  if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
1362      N1.getOperand(0).getOpcode() == ISD::SUB &&
1363      N0 == N1.getOperand(0).getOperand(1))
1364    return DAG.getNode(N1.getOpcode(), N->getDebugLoc(), VT,
1365                       N1.getOperand(0).getOperand(0), N1.getOperand(1));
1366
1367  // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
1368  if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
1369    SDValue N00 = N0.getOperand(0);
1370    SDValue N01 = N0.getOperand(1);
1371    SDValue N10 = N1.getOperand(0);
1372    SDValue N11 = N1.getOperand(1);
1373
1374    if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10))
1375      return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1376                         DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, N00, N10),
1377                         DAG.getNode(ISD::ADD, N1.getDebugLoc(), VT, N01, N11));
1378  }
1379
1380  if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0)))
1381    return SDValue(N, 0);
1382
1383  // fold (a+b) -> (a|b) iff a and b share no bits.
1384  if (VT.isInteger() && !VT.isVector()) {
1385    APInt LHSZero, LHSOne;
1386    APInt RHSZero, RHSOne;
1387    APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
1388    DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
1389
1390    if (LHSZero.getBoolValue()) {
1391      DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
1392
1393      // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1394      // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1395      if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
1396          (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
1397        return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1);
1398    }
1399  }
1400
1401  // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
1402  if (N0.getOpcode() == ISD::SHL && N0.getNode()->hasOneUse()) {
1403    SDValue Result = combineShlAddConstant(N->getDebugLoc(), N0, N1, DAG);
1404    if (Result.getNode()) return Result;
1405  }
1406  if (N1.getOpcode() == ISD::SHL && N1.getNode()->hasOneUse()) {
1407    SDValue Result = combineShlAddConstant(N->getDebugLoc(), N1, N0, DAG);
1408    if (Result.getNode()) return Result;
1409  }
1410
1411  // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
1412  if (N1.getOpcode() == ISD::SHL &&
1413      N1.getOperand(0).getOpcode() == ISD::SUB)
1414    if (ConstantSDNode *C =
1415          dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(0)))
1416      if (C->getAPIntValue() == 0)
1417        return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0,
1418                           DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1419                                       N1.getOperand(0).getOperand(1),
1420                                       N1.getOperand(1)));
1421  if (N0.getOpcode() == ISD::SHL &&
1422      N0.getOperand(0).getOpcode() == ISD::SUB)
1423    if (ConstantSDNode *C =
1424          dyn_cast<ConstantSDNode>(N0.getOperand(0).getOperand(0)))
1425      if (C->getAPIntValue() == 0)
1426        return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1,
1427                           DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1428                                       N0.getOperand(0).getOperand(1),
1429                                       N0.getOperand(1)));
1430
1431  if (N1.getOpcode() == ISD::AND) {
1432    SDValue AndOp0 = N1.getOperand(0);
1433    ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
1434    unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
1435    unsigned DestBits = VT.getScalarType().getSizeInBits();
1436
1437    // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
1438    // and similar xforms where the inner op is either ~0 or 0.
1439    if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
1440      DebugLoc DL = N->getDebugLoc();
1441      return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0);
1442    }
1443  }
1444
1445  // add (sext i1), X -> sub X, (zext i1)
1446  if (N0.getOpcode() == ISD::SIGN_EXTEND &&
1447      N0.getOperand(0).getValueType() == MVT::i1 &&
1448      !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) {
1449    DebugLoc DL = N->getDebugLoc();
1450    SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
1451    return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
1452  }
1453
1454  return SDValue();
1455}
1456
1457SDValue DAGCombiner::visitADDC(SDNode *N) {
1458  SDValue N0 = N->getOperand(0);
1459  SDValue N1 = N->getOperand(1);
1460  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1461  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1462  EVT VT = N0.getValueType();
1463
1464  // If the flag result is dead, turn this into an ADD.
1465  if (N->hasNUsesOfValue(0, 1))
1466    return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0),
1467                     DAG.getNode(ISD::CARRY_FALSE,
1468                                 N->getDebugLoc(), MVT::Glue));
1469
1470  // canonicalize constant to RHS.
1471  if (N0C && !N1C)
1472    return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
1473
1474  // fold (addc x, 0) -> x + no carry out
1475  if (N1C && N1C->isNullValue())
1476    return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
1477                                        N->getDebugLoc(), MVT::Glue));
1478
1479  // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
1480  APInt LHSZero, LHSOne;
1481  APInt RHSZero, RHSOne;
1482  APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
1483  DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
1484
1485  if (LHSZero.getBoolValue()) {
1486    DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
1487
1488    // If all possibly-set bits on the LHS are clear on the RHS, return an OR.
1489    // If all possibly-set bits on the RHS are clear on the LHS, return an OR.
1490    if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
1491        (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
1492      return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
1493                       DAG.getNode(ISD::CARRY_FALSE,
1494                                   N->getDebugLoc(), MVT::Glue));
1495  }
1496
1497  return SDValue();
1498}
1499
1500SDValue DAGCombiner::visitADDE(SDNode *N) {
1501  SDValue N0 = N->getOperand(0);
1502  SDValue N1 = N->getOperand(1);
1503  SDValue CarryIn = N->getOperand(2);
1504  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1505  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1506
1507  // canonicalize constant to RHS
1508  if (N0C && !N1C)
1509    return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(),
1510                       N1, N0, CarryIn);
1511
1512  // fold (adde x, y, false) -> (addc x, y)
1513  if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
1514    return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
1515
1516  return SDValue();
1517}
1518
1519SDValue DAGCombiner::visitSUB(SDNode *N) {
1520  SDValue N0 = N->getOperand(0);
1521  SDValue N1 = N->getOperand(1);
1522  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1523  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1524  EVT VT = N0.getValueType();
1525
1526  // fold vector ops
1527  if (VT.isVector()) {
1528    SDValue FoldedVOp = SimplifyVBinOp(N);
1529    if (FoldedVOp.getNode()) return FoldedVOp;
1530  }
1531
1532  // fold (sub x, x) -> 0
1533  if (N0 == N1)
1534    return DAG.getConstant(0, N->getValueType(0));
1535  // fold (sub c1, c2) -> c1-c2
1536  if (N0C && N1C)
1537    return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C);
1538  // fold (sub x, c) -> (add x, -c)
1539  if (N1C)
1540    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0,
1541                       DAG.getConstant(-N1C->getAPIntValue(), VT));
1542  // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
1543  if (N0C && N0C->isAllOnesValue())
1544    return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0);
1545  // fold (A+B)-A -> B
1546  if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
1547    return N0.getOperand(1);
1548  // fold (A+B)-B -> A
1549  if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
1550    return N0.getOperand(0);
1551  // fold ((A+(B+or-C))-B) -> A+or-C
1552  if (N0.getOpcode() == ISD::ADD &&
1553      (N0.getOperand(1).getOpcode() == ISD::SUB ||
1554       N0.getOperand(1).getOpcode() == ISD::ADD) &&
1555      N0.getOperand(1).getOperand(0) == N1)
1556    return DAG.getNode(N0.getOperand(1).getOpcode(), N->getDebugLoc(), VT,
1557                       N0.getOperand(0), N0.getOperand(1).getOperand(1));
1558  // fold ((A+(C+B))-B) -> A+C
1559  if (N0.getOpcode() == ISD::ADD &&
1560      N0.getOperand(1).getOpcode() == ISD::ADD &&
1561      N0.getOperand(1).getOperand(1) == N1)
1562    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT,
1563                       N0.getOperand(0), N0.getOperand(1).getOperand(0));
1564  // fold ((A-(B-C))-C) -> A-B
1565  if (N0.getOpcode() == ISD::SUB &&
1566      N0.getOperand(1).getOpcode() == ISD::SUB &&
1567      N0.getOperand(1).getOperand(1) == N1)
1568    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1569                       N0.getOperand(0), N0.getOperand(1).getOperand(0));
1570
1571  // If either operand of a sub is undef, the result is undef
1572  if (N0.getOpcode() == ISD::UNDEF)
1573    return N0;
1574  if (N1.getOpcode() == ISD::UNDEF)
1575    return N1;
1576
1577  // If the relocation model supports it, consider symbol offsets.
1578  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
1579    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
1580      // fold (sub Sym, c) -> Sym-c
1581      if (N1C && GA->getOpcode() == ISD::GlobalAddress)
1582        return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT,
1583                                    GA->getOffset() -
1584                                      (uint64_t)N1C->getSExtValue());
1585      // fold (sub Sym+c1, Sym+c2) -> c1-c2
1586      if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
1587        if (GA->getGlobal() == GB->getGlobal())
1588          return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
1589                                 VT);
1590    }
1591
1592  return SDValue();
1593}
1594
1595SDValue DAGCombiner::visitMUL(SDNode *N) {
1596  SDValue N0 = N->getOperand(0);
1597  SDValue N1 = N->getOperand(1);
1598  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1599  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1600  EVT VT = N0.getValueType();
1601
1602  // fold vector ops
1603  if (VT.isVector()) {
1604    SDValue FoldedVOp = SimplifyVBinOp(N);
1605    if (FoldedVOp.getNode()) return FoldedVOp;
1606  }
1607
1608  // fold (mul x, undef) -> 0
1609  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1610    return DAG.getConstant(0, VT);
1611  // fold (mul c1, c2) -> c1*c2
1612  if (N0C && N1C)
1613    return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0C, N1C);
1614  // canonicalize constant to RHS
1615  if (N0C && !N1C)
1616    return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, N1, N0);
1617  // fold (mul x, 0) -> 0
1618  if (N1C && N1C->isNullValue())
1619    return N1;
1620  // fold (mul x, -1) -> 0-x
1621  if (N1C && N1C->isAllOnesValue())
1622    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1623                       DAG.getConstant(0, VT), N0);
1624  // fold (mul x, (1 << c)) -> x << c
1625  if (N1C && N1C->getAPIntValue().isPowerOf2())
1626    return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
1627                       DAG.getConstant(N1C->getAPIntValue().logBase2(),
1628                                       getShiftAmountTy()));
1629  // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
1630  if (N1C && (-N1C->getAPIntValue()).isPowerOf2()) {
1631    unsigned Log2Val = (-N1C->getAPIntValue()).logBase2();
1632    // FIXME: If the input is something that is easily negated (e.g. a
1633    // single-use add), we should put the negate there.
1634    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1635                       DAG.getConstant(0, VT),
1636                       DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
1637                            DAG.getConstant(Log2Val, getShiftAmountTy())));
1638  }
1639  // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
1640  if (N1C && N0.getOpcode() == ISD::SHL &&
1641      isa<ConstantSDNode>(N0.getOperand(1))) {
1642    SDValue C3 = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1643                             N1, N0.getOperand(1));
1644    AddToWorkList(C3.getNode());
1645    return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1646                       N0.getOperand(0), C3);
1647  }
1648
1649  // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
1650  // use.
1651  {
1652    SDValue Sh(0,0), Y(0,0);
1653    // Check for both (mul (shl X, C), Y)  and  (mul Y, (shl X, C)).
1654    if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
1655        N0.getNode()->hasOneUse()) {
1656      Sh = N0; Y = N1;
1657    } else if (N1.getOpcode() == ISD::SHL &&
1658               isa<ConstantSDNode>(N1.getOperand(1)) &&
1659               N1.getNode()->hasOneUse()) {
1660      Sh = N1; Y = N0;
1661    }
1662
1663    if (Sh.getNode()) {
1664      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1665                                Sh.getOperand(0), Y);
1666      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT,
1667                         Mul, Sh.getOperand(1));
1668    }
1669  }
1670
1671  // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
1672  if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
1673      isa<ConstantSDNode>(N0.getOperand(1)))
1674    return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT,
1675                       DAG.getNode(ISD::MUL, N0.getDebugLoc(), VT,
1676                                   N0.getOperand(0), N1),
1677                       DAG.getNode(ISD::MUL, N1.getDebugLoc(), VT,
1678                                   N0.getOperand(1), N1));
1679
1680  // reassociate mul
1681  SDValue RMUL = ReassociateOps(ISD::MUL, N->getDebugLoc(), N0, N1);
1682  if (RMUL.getNode() != 0)
1683    return RMUL;
1684
1685  return SDValue();
1686}
1687
1688SDValue DAGCombiner::visitSDIV(SDNode *N) {
1689  SDValue N0 = N->getOperand(0);
1690  SDValue N1 = N->getOperand(1);
1691  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1692  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1693  EVT VT = N->getValueType(0);
1694
1695  // fold vector ops
1696  if (VT.isVector()) {
1697    SDValue FoldedVOp = SimplifyVBinOp(N);
1698    if (FoldedVOp.getNode()) return FoldedVOp;
1699  }
1700
1701  // fold (sdiv c1, c2) -> c1/c2
1702  if (N0C && N1C && !N1C->isNullValue())
1703    return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C);
1704  // fold (sdiv X, 1) -> X
1705  if (N1C && N1C->getSExtValue() == 1LL)
1706    return N0;
1707  // fold (sdiv X, -1) -> 0-X
1708  if (N1C && N1C->isAllOnesValue())
1709    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1710                       DAG.getConstant(0, VT), N0);
1711  // If we know the sign bits of both operands are zero, strength reduce to a
1712  // udiv instead.  Handles (X&15) /s 4 -> X&15 >> 2
1713  if (!VT.isVector()) {
1714    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
1715      return DAG.getNode(ISD::UDIV, N->getDebugLoc(), N1.getValueType(),
1716                         N0, N1);
1717  }
1718  // fold (sdiv X, pow2) -> simple ops after legalize
1719  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap() &&
1720      (isPowerOf2_64(N1C->getSExtValue()) ||
1721       isPowerOf2_64(-N1C->getSExtValue()))) {
1722    // If dividing by powers of two is cheap, then don't perform the following
1723    // fold.
1724    if (TLI.isPow2DivCheap())
1725      return SDValue();
1726
1727    int64_t pow2 = N1C->getSExtValue();
1728    int64_t abs2 = pow2 > 0 ? pow2 : -pow2;
1729    unsigned lg2 = Log2_64(abs2);
1730
1731    // Splat the sign bit into the register
1732    SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
1733                              DAG.getConstant(VT.getSizeInBits()-1,
1734                                              getShiftAmountTy()));
1735    AddToWorkList(SGN.getNode());
1736
1737    // Add (N0 < 0) ? abs2 - 1 : 0;
1738    SDValue SRL = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, SGN,
1739                              DAG.getConstant(VT.getSizeInBits() - lg2,
1740                                              getShiftAmountTy()));
1741    SDValue ADD = DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, SRL);
1742    AddToWorkList(SRL.getNode());
1743    AddToWorkList(ADD.getNode());    // Divide by pow2
1744    SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, ADD,
1745                              DAG.getConstant(lg2, getShiftAmountTy()));
1746
1747    // If we're dividing by a positive value, we're done.  Otherwise, we must
1748    // negate the result.
1749    if (pow2 > 0)
1750      return SRA;
1751
1752    AddToWorkList(SRA.getNode());
1753    return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
1754                       DAG.getConstant(0, VT), SRA);
1755  }
1756
1757  // if integer divide is expensive and we satisfy the requirements, emit an
1758  // alternate sequence.
1759  if (N1C && (N1C->getSExtValue() < -1 || N1C->getSExtValue() > 1) &&
1760      !TLI.isIntDivCheap()) {
1761    SDValue Op = BuildSDIV(N);
1762    if (Op.getNode()) return Op;
1763  }
1764
1765  // undef / X -> 0
1766  if (N0.getOpcode() == ISD::UNDEF)
1767    return DAG.getConstant(0, VT);
1768  // X / undef -> undef
1769  if (N1.getOpcode() == ISD::UNDEF)
1770    return N1;
1771
1772  return SDValue();
1773}
1774
1775SDValue DAGCombiner::visitUDIV(SDNode *N) {
1776  SDValue N0 = N->getOperand(0);
1777  SDValue N1 = N->getOperand(1);
1778  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
1779  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
1780  EVT VT = N->getValueType(0);
1781
1782  // fold vector ops
1783  if (VT.isVector()) {
1784    SDValue FoldedVOp = SimplifyVBinOp(N);
1785    if (FoldedVOp.getNode()) return FoldedVOp;
1786  }
1787
1788  // fold (udiv c1, c2) -> c1/c2
1789  if (N0C && N1C && !N1C->isNullValue())
1790    return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C);
1791  // fold (udiv x, (1 << c)) -> x >>u c
1792  if (N1C && N1C->getAPIntValue().isPowerOf2())
1793    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
1794                       DAG.getConstant(N1C->getAPIntValue().logBase2(),
1795                                       getShiftAmountTy()));
1796  // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1797  if (N1.getOpcode() == ISD::SHL) {
1798    if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
1799      if (SHC->getAPIntValue().isPowerOf2()) {
1800        EVT ADDVT = N1.getOperand(1).getValueType();
1801        SDValue Add = DAG.getNode(ISD::ADD, N->getDebugLoc(), ADDVT,
1802                                  N1.getOperand(1),
1803                                  DAG.getConstant(SHC->getAPIntValue()
1804                                                                  .logBase2(),
1805                                                  ADDVT));
1806        AddToWorkList(Add.getNode());
1807        return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, Add);
1808      }
1809    }
1810  }
1811  // fold (udiv x, c) -> alternate
1812  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) {
1813    SDValue Op = BuildUDIV(N);
1814    if (Op.getNode()) return Op;
1815  }
1816
1817  // undef / X -> 0
1818  if (N0.getOpcode() == ISD::UNDEF)
1819    return DAG.getConstant(0, VT);
1820  // X / undef -> undef
1821  if (N1.getOpcode() == ISD::UNDEF)
1822    return N1;
1823
1824  return SDValue();
1825}
1826
1827SDValue DAGCombiner::visitSREM(SDNode *N) {
1828  SDValue N0 = N->getOperand(0);
1829  SDValue N1 = N->getOperand(1);
1830  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1831  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1832  EVT VT = N->getValueType(0);
1833
1834  // fold (srem c1, c2) -> c1%c2
1835  if (N0C && N1C && !N1C->isNullValue())
1836    return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C);
1837  // If we know the sign bits of both operands are zero, strength reduce to a
1838  // urem instead.  Handles (X & 0x0FFFFFFF) %s 16 -> X&15
1839  if (!VT.isVector()) {
1840    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
1841      return DAG.getNode(ISD::UREM, N->getDebugLoc(), VT, N0, N1);
1842  }
1843
1844  // If X/C can be simplified by the division-by-constant logic, lower
1845  // X%C to the equivalent of X-X/C*C.
1846  if (N1C && !N1C->isNullValue()) {
1847    SDValue Div = DAG.getNode(ISD::SDIV, N->getDebugLoc(), VT, N0, N1);
1848    AddToWorkList(Div.getNode());
1849    SDValue OptimizedDiv = combine(Div.getNode());
1850    if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
1851      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1852                                OptimizedDiv, N1);
1853      SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul);
1854      AddToWorkList(Mul.getNode());
1855      return Sub;
1856    }
1857  }
1858
1859  // undef % X -> 0
1860  if (N0.getOpcode() == ISD::UNDEF)
1861    return DAG.getConstant(0, VT);
1862  // X % undef -> undef
1863  if (N1.getOpcode() == ISD::UNDEF)
1864    return N1;
1865
1866  return SDValue();
1867}
1868
1869SDValue DAGCombiner::visitUREM(SDNode *N) {
1870  SDValue N0 = N->getOperand(0);
1871  SDValue N1 = N->getOperand(1);
1872  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1873  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1874  EVT VT = N->getValueType(0);
1875
1876  // fold (urem c1, c2) -> c1%c2
1877  if (N0C && N1C && !N1C->isNullValue())
1878    return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C);
1879  // fold (urem x, pow2) -> (and x, pow2-1)
1880  if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2())
1881    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0,
1882                       DAG.getConstant(N1C->getAPIntValue()-1,VT));
1883  // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
1884  if (N1.getOpcode() == ISD::SHL) {
1885    if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
1886      if (SHC->getAPIntValue().isPowerOf2()) {
1887        SDValue Add =
1888          DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1,
1889                 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()),
1890                                 VT));
1891        AddToWorkList(Add.getNode());
1892        return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, Add);
1893      }
1894    }
1895  }
1896
1897  // If X/C can be simplified by the division-by-constant logic, lower
1898  // X%C to the equivalent of X-X/C*C.
1899  if (N1C && !N1C->isNullValue()) {
1900    SDValue Div = DAG.getNode(ISD::UDIV, N->getDebugLoc(), VT, N0, N1);
1901    AddToWorkList(Div.getNode());
1902    SDValue OptimizedDiv = combine(Div.getNode());
1903    if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
1904      SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT,
1905                                OptimizedDiv, N1);
1906      SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul);
1907      AddToWorkList(Mul.getNode());
1908      return Sub;
1909    }
1910  }
1911
1912  // undef % X -> 0
1913  if (N0.getOpcode() == ISD::UNDEF)
1914    return DAG.getConstant(0, VT);
1915  // X % undef -> undef
1916  if (N1.getOpcode() == ISD::UNDEF)
1917    return N1;
1918
1919  return SDValue();
1920}
1921
1922SDValue DAGCombiner::visitMULHS(SDNode *N) {
1923  SDValue N0 = N->getOperand(0);
1924  SDValue N1 = N->getOperand(1);
1925  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1926  EVT VT = N->getValueType(0);
1927  DebugLoc DL = N->getDebugLoc();
1928
1929  // fold (mulhs x, 0) -> 0
1930  if (N1C && N1C->isNullValue())
1931    return N1;
1932  // fold (mulhs x, 1) -> (sra x, size(x)-1)
1933  if (N1C && N1C->getAPIntValue() == 1)
1934    return DAG.getNode(ISD::SRA, N->getDebugLoc(), N0.getValueType(), N0,
1935                       DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
1936                                       getShiftAmountTy()));
1937  // fold (mulhs x, undef) -> 0
1938  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1939    return DAG.getConstant(0, VT);
1940
1941  // If the type twice as wide is legal, transform the mulhs to a wider multiply
1942  // plus a shift.
1943  if (VT.isSimple() && !VT.isVector()) {
1944    MVT Simple = VT.getSimpleVT();
1945    unsigned SimpleSize = Simple.getSizeInBits();
1946    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
1947    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
1948      N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
1949      N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
1950      N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
1951      N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
1952                       DAG.getConstant(SimpleSize, getShiftAmountTy()));
1953      return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
1954    }
1955  }
1956
1957  return SDValue();
1958}
1959
1960SDValue DAGCombiner::visitMULHU(SDNode *N) {
1961  SDValue N0 = N->getOperand(0);
1962  SDValue N1 = N->getOperand(1);
1963  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1964  EVT VT = N->getValueType(0);
1965  DebugLoc DL = N->getDebugLoc();
1966
1967  // fold (mulhu x, 0) -> 0
1968  if (N1C && N1C->isNullValue())
1969    return N1;
1970  // fold (mulhu x, 1) -> 0
1971  if (N1C && N1C->getAPIntValue() == 1)
1972    return DAG.getConstant(0, N0.getValueType());
1973  // fold (mulhu x, undef) -> 0
1974  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
1975    return DAG.getConstant(0, VT);
1976
1977  // If the type twice as wide is legal, transform the mulhu to a wider multiply
1978  // plus a shift.
1979  if (VT.isSimple() && !VT.isVector()) {
1980    MVT Simple = VT.getSimpleVT();
1981    unsigned SimpleSize = Simple.getSizeInBits();
1982    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
1983    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
1984      N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
1985      N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
1986      N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
1987      N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
1988                       DAG.getConstant(SimpleSize, getShiftAmountTy()));
1989      return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
1990    }
1991  }
1992
1993  return SDValue();
1994}
1995
1996/// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that
1997/// compute two values. LoOp and HiOp give the opcodes for the two computations
1998/// that are being performed. Return true if a simplification was made.
1999///
2000SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
2001                                                unsigned HiOp) {
2002  // If the high half is not needed, just compute the low half.
2003  bool HiExists = N->hasAnyUseOfValue(1);
2004  if (!HiExists &&
2005      (!LegalOperations ||
2006       TLI.isOperationLegal(LoOp, N->getValueType(0)))) {
2007    SDValue Res = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0),
2008                              N->op_begin(), N->getNumOperands());
2009    return CombineTo(N, Res, Res);
2010  }
2011
2012  // If the low half is not needed, just compute the high half.
2013  bool LoExists = N->hasAnyUseOfValue(0);
2014  if (!LoExists &&
2015      (!LegalOperations ||
2016       TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
2017    SDValue Res = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1),
2018                              N->op_begin(), N->getNumOperands());
2019    return CombineTo(N, Res, Res);
2020  }
2021
2022  // If both halves are used, return as it is.
2023  if (LoExists && HiExists)
2024    return SDValue();
2025
2026  // If the two computed results can be simplified separately, separate them.
2027  if (LoExists) {
2028    SDValue Lo = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0),
2029                             N->op_begin(), N->getNumOperands());
2030    AddToWorkList(Lo.getNode());
2031    SDValue LoOpt = combine(Lo.getNode());
2032    if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
2033        (!LegalOperations ||
2034         TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
2035      return CombineTo(N, LoOpt, LoOpt);
2036  }
2037
2038  if (HiExists) {
2039    SDValue Hi = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1),
2040                             N->op_begin(), N->getNumOperands());
2041    AddToWorkList(Hi.getNode());
2042    SDValue HiOpt = combine(Hi.getNode());
2043    if (HiOpt.getNode() && HiOpt != Hi &&
2044        (!LegalOperations ||
2045         TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
2046      return CombineTo(N, HiOpt, HiOpt);
2047  }
2048
2049  return SDValue();
2050}
2051
2052SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
2053  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
2054  if (Res.getNode()) return Res;
2055
2056  EVT VT = N->getValueType(0);
2057  DebugLoc DL = N->getDebugLoc();
2058
2059  // If the type twice as wide is legal, transform the mulhu to a wider multiply
2060  // plus a shift.
2061  if (VT.isSimple() && !VT.isVector()) {
2062    MVT Simple = VT.getSimpleVT();
2063    unsigned SimpleSize = Simple.getSizeInBits();
2064    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
2065    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
2066      SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
2067      SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
2068      Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
2069      // Compute the high part as N1.
2070      Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
2071                       DAG.getConstant(SimpleSize, getShiftAmountTy()));
2072      Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
2073      // Compute the low part as N0.
2074      Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
2075      return CombineTo(N, Lo, Hi);
2076    }
2077  }
2078
2079  return SDValue();
2080}
2081
2082SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
2083  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
2084  if (Res.getNode()) return Res;
2085
2086  EVT VT = N->getValueType(0);
2087  DebugLoc DL = N->getDebugLoc();
2088
2089  // If the type twice as wide is legal, transform the mulhu to a wider multiply
2090  // plus a shift.
2091  if (VT.isSimple() && !VT.isVector()) {
2092    MVT Simple = VT.getSimpleVT();
2093    unsigned SimpleSize = Simple.getSizeInBits();
2094    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
2095    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
2096      SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
2097      SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
2098      Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
2099      // Compute the high part as N1.
2100      Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
2101                       DAG.getConstant(SimpleSize, getShiftAmountTy()));
2102      Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
2103      // Compute the low part as N0.
2104      Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
2105      return CombineTo(N, Lo, Hi);
2106    }
2107  }
2108
2109  return SDValue();
2110}
2111
2112SDValue DAGCombiner::visitSDIVREM(SDNode *N) {
2113  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM);
2114  if (Res.getNode()) return Res;
2115
2116  return SDValue();
2117}
2118
2119SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
2120  SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM);
2121  if (Res.getNode()) return Res;
2122
2123  return SDValue();
2124}
2125
2126/// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with
2127/// two operands of the same opcode, try to simplify it.
2128SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
2129  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2130  EVT VT = N0.getValueType();
2131  assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
2132
2133  // Bail early if none of these transforms apply.
2134  if (N0.getNode()->getNumOperands() == 0) return SDValue();
2135
2136  // For each of OP in AND/OR/XOR:
2137  // fold (OP (zext x), (zext y)) -> (zext (OP x, y))
2138  // fold (OP (sext x), (sext y)) -> (sext (OP x, y))
2139  // fold (OP (aext x), (aext y)) -> (aext (OP x, y))
2140  // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
2141  //
2142  // do not sink logical op inside of a vector extend, since it may combine
2143  // into a vsetcc.
2144  EVT Op0VT = N0.getOperand(0).getValueType();
2145  if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
2146       N0.getOpcode() == ISD::SIGN_EXTEND ||
2147       // Avoid infinite looping with PromoteIntBinOp.
2148       (N0.getOpcode() == ISD::ANY_EXTEND &&
2149        (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
2150       (N0.getOpcode() == ISD::TRUNCATE &&
2151        (!TLI.isZExtFree(VT, Op0VT) ||
2152         !TLI.isTruncateFree(Op0VT, VT)) &&
2153        TLI.isTypeLegal(Op0VT))) &&
2154      !VT.isVector() &&
2155      Op0VT == N1.getOperand(0).getValueType() &&
2156      (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
2157    SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
2158                                 N0.getOperand(0).getValueType(),
2159                                 N0.getOperand(0), N1.getOperand(0));
2160    AddToWorkList(ORNode.getNode());
2161    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, ORNode);
2162  }
2163
2164  // For each of OP in SHL/SRL/SRA/AND...
2165  //   fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
2166  //   fold (or  (OP x, z), (OP y, z)) -> (OP (or  x, y), z)
2167  //   fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
2168  if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
2169       N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
2170      N0.getOperand(1) == N1.getOperand(1)) {
2171    SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
2172                                 N0.getOperand(0).getValueType(),
2173                                 N0.getOperand(0), N1.getOperand(0));
2174    AddToWorkList(ORNode.getNode());
2175    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
2176                       ORNode, N0.getOperand(1));
2177  }
2178
2179  return SDValue();
2180}
2181
2182SDValue DAGCombiner::visitAND(SDNode *N) {
2183  SDValue N0 = N->getOperand(0);
2184  SDValue N1 = N->getOperand(1);
2185  SDValue LL, LR, RL, RR, CC0, CC1;
2186  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2187  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2188  EVT VT = N1.getValueType();
2189  unsigned BitWidth = VT.getScalarType().getSizeInBits();
2190
2191  // fold vector ops
2192  if (VT.isVector()) {
2193    SDValue FoldedVOp = SimplifyVBinOp(N);
2194    if (FoldedVOp.getNode()) return FoldedVOp;
2195  }
2196
2197  // fold (and x, undef) -> 0
2198  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
2199    return DAG.getConstant(0, VT);
2200  // fold (and c1, c2) -> c1&c2
2201  if (N0C && N1C)
2202    return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C);
2203  // canonicalize constant to RHS
2204  if (N0C && !N1C)
2205    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N1, N0);
2206  // fold (and x, -1) -> x
2207  if (N1C && N1C->isAllOnesValue())
2208    return N0;
2209  // if (and x, c) is known to be zero, return 0
2210  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
2211                                   APInt::getAllOnesValue(BitWidth)))
2212    return DAG.getConstant(0, VT);
2213  // reassociate and
2214  SDValue RAND = ReassociateOps(ISD::AND, N->getDebugLoc(), N0, N1);
2215  if (RAND.getNode() != 0)
2216    return RAND;
2217  // fold (and (or x, C), D) -> D if (C & D) == D
2218  if (N1C && N0.getOpcode() == ISD::OR)
2219    if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
2220      if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue())
2221        return N1;
2222  // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
2223  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
2224    SDValue N0Op0 = N0.getOperand(0);
2225    APInt Mask = ~N1C->getAPIntValue();
2226    Mask = Mask.trunc(N0Op0.getValueSizeInBits());
2227    if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
2228      SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(),
2229                                 N0.getValueType(), N0Op0);
2230
2231      // Replace uses of the AND with uses of the Zero extend node.
2232      CombineTo(N, Zext);
2233
2234      // We actually want to replace all uses of the any_extend with the
2235      // zero_extend, to avoid duplicating things.  This will later cause this
2236      // AND to be folded.
2237      CombineTo(N0.getNode(), Zext);
2238      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
2239    }
2240  }
2241  // fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
2242  if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
2243    ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
2244    ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
2245
2246    if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
2247        LL.getValueType().isInteger()) {
2248      // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0)
2249      if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
2250        SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(),
2251                                     LR.getValueType(), LL, RL);
2252        AddToWorkList(ORNode.getNode());
2253        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
2254      }
2255      // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
2256      if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
2257        SDValue ANDNode = DAG.getNode(ISD::AND, N0.getDebugLoc(),
2258                                      LR.getValueType(), LL, RL);
2259        AddToWorkList(ANDNode.getNode());
2260        return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1);
2261      }
2262      // fold (and (setgt X,  -1), (setgt Y,  -1)) -> (setgt (or X, Y), -1)
2263      if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
2264        SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(),
2265                                     LR.getValueType(), LL, RL);
2266        AddToWorkList(ORNode.getNode());
2267        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
2268      }
2269    }
2270    // canonicalize equivalent to ll == rl
2271    if (LL == RR && LR == RL) {
2272      Op1 = ISD::getSetCCSwappedOperands(Op1);
2273      std::swap(RL, RR);
2274    }
2275    if (LL == RL && LR == RR) {
2276      bool isInteger = LL.getValueType().isInteger();
2277      ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
2278      if (Result != ISD::SETCC_INVALID &&
2279          (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType())))
2280        return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(),
2281                            LL, LR, Result);
2282    }
2283  }
2284
2285  // Simplify: (and (op x...), (op y...))  -> (op (and x, y))
2286  if (N0.getOpcode() == N1.getOpcode()) {
2287    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2288    if (Tmp.getNode()) return Tmp;
2289  }
2290
2291  // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
2292  // fold (and (sra)) -> (and (srl)) when possible.
2293  if (!VT.isVector() &&
2294      SimplifyDemandedBits(SDValue(N, 0)))
2295    return SDValue(N, 0);
2296
2297  // fold (zext_inreg (extload x)) -> (zextload x)
2298  if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
2299    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
2300    EVT MemVT = LN0->getMemoryVT();
2301    // If we zero all the possible extended bits, then we can turn this into
2302    // a zextload if we are running before legalize or the operation is legal.
2303    unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
2304    if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
2305                           BitWidth - MemVT.getScalarType().getSizeInBits())) &&
2306        ((!LegalOperations && !LN0->isVolatile()) ||
2307         TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
2308      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
2309                                       LN0->getChain(), LN0->getBasePtr(),
2310                                       LN0->getPointerInfo(), MemVT,
2311                                       LN0->isVolatile(), LN0->isNonTemporal(),
2312                                       LN0->getAlignment());
2313      AddToWorkList(N);
2314      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
2315      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
2316    }
2317  }
2318  // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
2319  if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
2320      N0.hasOneUse()) {
2321    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
2322    EVT MemVT = LN0->getMemoryVT();
2323    // If we zero all the possible extended bits, then we can turn this into
2324    // a zextload if we are running before legalize or the operation is legal.
2325    unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
2326    if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
2327                           BitWidth - MemVT.getScalarType().getSizeInBits())) &&
2328        ((!LegalOperations && !LN0->isVolatile()) ||
2329         TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
2330      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
2331                                       LN0->getChain(),
2332                                       LN0->getBasePtr(), LN0->getPointerInfo(),
2333                                       MemVT,
2334                                       LN0->isVolatile(), LN0->isNonTemporal(),
2335                                       LN0->getAlignment());
2336      AddToWorkList(N);
2337      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
2338      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
2339    }
2340  }
2341
2342  // fold (and (load x), 255) -> (zextload x, i8)
2343  // fold (and (extload x, i16), 255) -> (zextload x, i8)
2344  // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
2345  if (N1C && (N0.getOpcode() == ISD::LOAD ||
2346              (N0.getOpcode() == ISD::ANY_EXTEND &&
2347               N0.getOperand(0).getOpcode() == ISD::LOAD))) {
2348    bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND;
2349    LoadSDNode *LN0 = HasAnyExt
2350      ? cast<LoadSDNode>(N0.getOperand(0))
2351      : cast<LoadSDNode>(N0);
2352    if (LN0->getExtensionType() != ISD::SEXTLOAD &&
2353        LN0->isUnindexed() && N0.hasOneUse() && LN0->hasOneUse()) {
2354      uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
2355      if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){
2356        EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
2357        EVT LoadedVT = LN0->getMemoryVT();
2358
2359        if (ExtVT == LoadedVT &&
2360            (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
2361          EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
2362
2363          SDValue NewLoad =
2364            DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
2365                           LN0->getChain(), LN0->getBasePtr(),
2366                           LN0->getPointerInfo(),
2367                           ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
2368                           LN0->getAlignment());
2369          AddToWorkList(N);
2370          CombineTo(LN0, NewLoad, NewLoad.getValue(1));
2371          return SDValue(N, 0);   // Return N so it doesn't get rechecked!
2372        }
2373
2374        // Do not change the width of a volatile load.
2375        // Do not generate loads of non-round integer types since these can
2376        // be expensive (and would be wrong if the type is not byte sized).
2377        if (!LN0->isVolatile() && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
2378            (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
2379          EVT PtrType = LN0->getOperand(1).getValueType();
2380
2381          unsigned Alignment = LN0->getAlignment();
2382          SDValue NewPtr = LN0->getBasePtr();
2383
2384          // For big endian targets, we need to add an offset to the pointer
2385          // to load the correct bytes.  For little endian systems, we merely
2386          // need to read fewer bytes from the same pointer.
2387          if (TLI.isBigEndian()) {
2388            unsigned LVTStoreBytes = LoadedVT.getStoreSize();
2389            unsigned EVTStoreBytes = ExtVT.getStoreSize();
2390            unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
2391            NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), PtrType,
2392                                 NewPtr, DAG.getConstant(PtrOff, PtrType));
2393            Alignment = MinAlign(Alignment, PtrOff);
2394          }
2395
2396          AddToWorkList(NewPtr.getNode());
2397
2398          EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
2399          SDValue Load =
2400            DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
2401                           LN0->getChain(), NewPtr,
2402                           LN0->getPointerInfo(),
2403                           ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
2404                           Alignment);
2405          AddToWorkList(N);
2406          CombineTo(LN0, Load, Load.getValue(1));
2407          return SDValue(N, 0);   // Return N so it doesn't get rechecked!
2408        }
2409      }
2410    }
2411  }
2412
2413  return SDValue();
2414}
2415
2416SDValue DAGCombiner::visitOR(SDNode *N) {
2417  SDValue N0 = N->getOperand(0);
2418  SDValue N1 = N->getOperand(1);
2419  SDValue LL, LR, RL, RR, CC0, CC1;
2420  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2421  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2422  EVT VT = N1.getValueType();
2423
2424  // fold vector ops
2425  if (VT.isVector()) {
2426    SDValue FoldedVOp = SimplifyVBinOp(N);
2427    if (FoldedVOp.getNode()) return FoldedVOp;
2428  }
2429
2430  // fold (or x, undef) -> -1
2431  if (!LegalOperations &&
2432      (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) {
2433    EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
2434    return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
2435  }
2436  // fold (or c1, c2) -> c1|c2
2437  if (N0C && N1C)
2438    return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C);
2439  // canonicalize constant to RHS
2440  if (N0C && !N1C)
2441    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N1, N0);
2442  // fold (or x, 0) -> x
2443  if (N1C && N1C->isNullValue())
2444    return N0;
2445  // fold (or x, -1) -> -1
2446  if (N1C && N1C->isAllOnesValue())
2447    return N1;
2448  // fold (or x, c) -> c iff (x & ~c) == 0
2449  if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
2450    return N1;
2451  // reassociate or
2452  SDValue ROR = ReassociateOps(ISD::OR, N->getDebugLoc(), N0, N1);
2453  if (ROR.getNode() != 0)
2454    return ROR;
2455  // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
2456  // iff (c1 & c2) == 0.
2457  if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
2458             isa<ConstantSDNode>(N0.getOperand(1))) {
2459    ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
2460    if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0)
2461      return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
2462                         DAG.getNode(ISD::OR, N0.getDebugLoc(), VT,
2463                                     N0.getOperand(0), N1),
2464                         DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1));
2465  }
2466  // fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
2467  if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
2468    ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
2469    ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
2470
2471    if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
2472        LL.getValueType().isInteger()) {
2473      // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0)
2474      // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0)
2475      if (cast<ConstantSDNode>(LR)->isNullValue() &&
2476          (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
2477        SDValue ORNode = DAG.getNode(ISD::OR, LR.getDebugLoc(),
2478                                     LR.getValueType(), LL, RL);
2479        AddToWorkList(ORNode.getNode());
2480        return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1);
2481      }
2482      // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
2483      // fold (or (setgt X, -1), (setgt Y  -1)) -> (setgt (and X, Y), -1)
2484      if (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
2485          (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
2486        SDValue ANDNode = DAG.getNode(ISD::AND, LR.getDebugLoc(),
2487                                      LR.getValueType(), LL, RL);
2488        AddToWorkList(ANDNode.getNode());
2489        return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1);
2490      }
2491    }
2492    // canonicalize equivalent to ll == rl
2493    if (LL == RR && LR == RL) {
2494      Op1 = ISD::getSetCCSwappedOperands(Op1);
2495      std::swap(RL, RR);
2496    }
2497    if (LL == RL && LR == RR) {
2498      bool isInteger = LL.getValueType().isInteger();
2499      ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
2500      if (Result != ISD::SETCC_INVALID &&
2501          (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType())))
2502        return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(),
2503                            LL, LR, Result);
2504    }
2505  }
2506
2507  // Simplify: (or (op x...), (op y...))  -> (op (or x, y))
2508  if (N0.getOpcode() == N1.getOpcode()) {
2509    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2510    if (Tmp.getNode()) return Tmp;
2511  }
2512
2513  // (or (and X, C1), (and Y, C2))  -> (and (or X, Y), C3) if possible.
2514  if (N0.getOpcode() == ISD::AND &&
2515      N1.getOpcode() == ISD::AND &&
2516      N0.getOperand(1).getOpcode() == ISD::Constant &&
2517      N1.getOperand(1).getOpcode() == ISD::Constant &&
2518      // Don't increase # computations.
2519      (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
2520    // We can only do this xform if we know that bits from X that are set in C2
2521    // but not in C1 are already zero.  Likewise for Y.
2522    const APInt &LHSMask =
2523      cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
2524    const APInt &RHSMask =
2525      cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue();
2526
2527    if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
2528        DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
2529      SDValue X = DAG.getNode(ISD::OR, N0.getDebugLoc(), VT,
2530                              N0.getOperand(0), N1.getOperand(0));
2531      return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, X,
2532                         DAG.getConstant(LHSMask | RHSMask, VT));
2533    }
2534  }
2535
2536  // See if this is some rotate idiom.
2537  if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc()))
2538    return SDValue(Rot, 0);
2539
2540  // Simplify the operands using demanded-bits information.
2541  if (!VT.isVector() &&
2542      SimplifyDemandedBits(SDValue(N, 0)))
2543    return SDValue(N, 0);
2544
2545  return SDValue();
2546}
2547
2548/// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present.
2549static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
2550  if (Op.getOpcode() == ISD::AND) {
2551    if (isa<ConstantSDNode>(Op.getOperand(1))) {
2552      Mask = Op.getOperand(1);
2553      Op = Op.getOperand(0);
2554    } else {
2555      return false;
2556    }
2557  }
2558
2559  if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
2560    Shift = Op;
2561    return true;
2562  }
2563
2564  return false;
2565}
2566
2567// MatchRotate - Handle an 'or' of two operands.  If this is one of the many
2568// idioms for rotate, and if the target supports rotation instructions, generate
2569// a rot[lr].
2570SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
2571  // Must be a legal type.  Expanded 'n promoted things won't work with rotates.
2572  EVT VT = LHS.getValueType();
2573  if (!TLI.isTypeLegal(VT)) return 0;
2574
2575  // The target must have at least one rotate flavor.
2576  bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT);
2577  bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT);
2578  if (!HasROTL && !HasROTR) return 0;
2579
2580  // Match "(X shl/srl V1) & V2" where V2 may not be present.
2581  SDValue LHSShift;   // The shift.
2582  SDValue LHSMask;    // AND value if any.
2583  if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
2584    return 0; // Not part of a rotate.
2585
2586  SDValue RHSShift;   // The shift.
2587  SDValue RHSMask;    // AND value if any.
2588  if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
2589    return 0; // Not part of a rotate.
2590
2591  if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
2592    return 0;   // Not shifting the same value.
2593
2594  if (LHSShift.getOpcode() == RHSShift.getOpcode())
2595    return 0;   // Shifts must disagree.
2596
2597  // Canonicalize shl to left side in a shl/srl pair.
2598  if (RHSShift.getOpcode() == ISD::SHL) {
2599    std::swap(LHS, RHS);
2600    std::swap(LHSShift, RHSShift);
2601    std::swap(LHSMask , RHSMask );
2602  }
2603
2604  unsigned OpSizeInBits = VT.getSizeInBits();
2605  SDValue LHSShiftArg = LHSShift.getOperand(0);
2606  SDValue LHSShiftAmt = LHSShift.getOperand(1);
2607  SDValue RHSShiftAmt = RHSShift.getOperand(1);
2608
2609  // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
2610  // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
2611  if (LHSShiftAmt.getOpcode() == ISD::Constant &&
2612      RHSShiftAmt.getOpcode() == ISD::Constant) {
2613    uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue();
2614    uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue();
2615    if ((LShVal + RShVal) != OpSizeInBits)
2616      return 0;
2617
2618    SDValue Rot;
2619    if (HasROTL)
2620      Rot = DAG.getNode(ISD::ROTL, DL, VT, LHSShiftArg, LHSShiftAmt);
2621    else
2622      Rot = DAG.getNode(ISD::ROTR, DL, VT, LHSShiftArg, RHSShiftAmt);
2623
2624    // If there is an AND of either shifted operand, apply it to the result.
2625    if (LHSMask.getNode() || RHSMask.getNode()) {
2626      APInt Mask = APInt::getAllOnesValue(OpSizeInBits);
2627
2628      if (LHSMask.getNode()) {
2629        APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal);
2630        Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits;
2631      }
2632      if (RHSMask.getNode()) {
2633        APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal);
2634        Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits;
2635      }
2636
2637      Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT));
2638    }
2639
2640    return Rot.getNode();
2641  }
2642
2643  // If there is a mask here, and we have a variable shift, we can't be sure
2644  // that we're masking out the right stuff.
2645  if (LHSMask.getNode() || RHSMask.getNode())
2646    return 0;
2647
2648  // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotl x, y)
2649  // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotr x, (sub 32, y))
2650  if (RHSShiftAmt.getOpcode() == ISD::SUB &&
2651      LHSShiftAmt == RHSShiftAmt.getOperand(1)) {
2652    if (ConstantSDNode *SUBC =
2653          dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
2654      if (SUBC->getAPIntValue() == OpSizeInBits) {
2655        if (HasROTL)
2656          return DAG.getNode(ISD::ROTL, DL, VT,
2657                             LHSShiftArg, LHSShiftAmt).getNode();
2658        else
2659          return DAG.getNode(ISD::ROTR, DL, VT,
2660                             LHSShiftArg, RHSShiftAmt).getNode();
2661      }
2662    }
2663  }
2664
2665  // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y)
2666  // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y))
2667  if (LHSShiftAmt.getOpcode() == ISD::SUB &&
2668      RHSShiftAmt == LHSShiftAmt.getOperand(1)) {
2669    if (ConstantSDNode *SUBC =
2670          dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
2671      if (SUBC->getAPIntValue() == OpSizeInBits) {
2672        if (HasROTR)
2673          return DAG.getNode(ISD::ROTR, DL, VT,
2674                             LHSShiftArg, RHSShiftAmt).getNode();
2675        else
2676          return DAG.getNode(ISD::ROTL, DL, VT,
2677                             LHSShiftArg, LHSShiftAmt).getNode();
2678      }
2679    }
2680  }
2681
2682  // Look for sign/zext/any-extended or truncate cases:
2683  if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
2684       || LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
2685       || LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
2686       || LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
2687      (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
2688       || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
2689       || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND
2690       || RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
2691    SDValue LExtOp0 = LHSShiftAmt.getOperand(0);
2692    SDValue RExtOp0 = RHSShiftAmt.getOperand(0);
2693    if (RExtOp0.getOpcode() == ISD::SUB &&
2694        RExtOp0.getOperand(1) == LExtOp0) {
2695      // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
2696      //   (rotl x, y)
2697      // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
2698      //   (rotr x, (sub 32, y))
2699      if (ConstantSDNode *SUBC =
2700            dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0))) {
2701        if (SUBC->getAPIntValue() == OpSizeInBits) {
2702          return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
2703                             LHSShiftArg,
2704                             HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
2705        }
2706      }
2707    } else if (LExtOp0.getOpcode() == ISD::SUB &&
2708               RExtOp0 == LExtOp0.getOperand(1)) {
2709      // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
2710      //   (rotr x, y)
2711      // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
2712      //   (rotl x, (sub 32, y))
2713      if (ConstantSDNode *SUBC =
2714            dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0))) {
2715        if (SUBC->getAPIntValue() == OpSizeInBits) {
2716          return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT,
2717                             LHSShiftArg,
2718                             HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
2719        }
2720      }
2721    }
2722  }
2723
2724  return 0;
2725}
2726
2727SDValue DAGCombiner::visitXOR(SDNode *N) {
2728  SDValue N0 = N->getOperand(0);
2729  SDValue N1 = N->getOperand(1);
2730  SDValue LHS, RHS, CC;
2731  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2732  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2733  EVT VT = N0.getValueType();
2734
2735  // fold vector ops
2736  if (VT.isVector()) {
2737    SDValue FoldedVOp = SimplifyVBinOp(N);
2738    if (FoldedVOp.getNode()) return FoldedVOp;
2739  }
2740
2741  // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
2742  if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
2743    return DAG.getConstant(0, VT);
2744  // fold (xor x, undef) -> undef
2745  if (N0.getOpcode() == ISD::UNDEF)
2746    return N0;
2747  if (N1.getOpcode() == ISD::UNDEF)
2748    return N1;
2749  // fold (xor c1, c2) -> c1^c2
2750  if (N0C && N1C)
2751    return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C);
2752  // canonicalize constant to RHS
2753  if (N0C && !N1C)
2754    return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0);
2755  // fold (xor x, 0) -> x
2756  if (N1C && N1C->isNullValue())
2757    return N0;
2758  // reassociate xor
2759  SDValue RXOR = ReassociateOps(ISD::XOR, N->getDebugLoc(), N0, N1);
2760  if (RXOR.getNode() != 0)
2761    return RXOR;
2762
2763  // fold !(x cc y) -> (x !cc y)
2764  if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) {
2765    bool isInt = LHS.getValueType().isInteger();
2766    ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
2767                                               isInt);
2768
2769    if (!LegalOperations || TLI.isCondCodeLegal(NotCC, LHS.getValueType())) {
2770      switch (N0.getOpcode()) {
2771      default:
2772        llvm_unreachable("Unhandled SetCC Equivalent!");
2773      case ISD::SETCC:
2774        return DAG.getSetCC(N->getDebugLoc(), VT, LHS, RHS, NotCC);
2775      case ISD::SELECT_CC:
2776        return DAG.getSelectCC(N->getDebugLoc(), LHS, RHS, N0.getOperand(2),
2777                               N0.getOperand(3), NotCC);
2778      }
2779    }
2780  }
2781
2782  // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
2783  if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND &&
2784      N0.getNode()->hasOneUse() &&
2785      isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
2786    SDValue V = N0.getOperand(0);
2787    V = DAG.getNode(ISD::XOR, N0.getDebugLoc(), V.getValueType(), V,
2788                    DAG.getConstant(1, V.getValueType()));
2789    AddToWorkList(V.getNode());
2790    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, V);
2791  }
2792
2793  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
2794  if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 &&
2795      (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
2796    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
2797    if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
2798      unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
2799      LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS
2800      RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS
2801      AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
2802      return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS);
2803    }
2804  }
2805  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
2806  if (N1C && N1C->isAllOnesValue() &&
2807      (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
2808    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
2809    if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
2810      unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
2811      LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS
2812      RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS
2813      AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
2814      return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS);
2815    }
2816  }
2817  // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
2818  if (N1C && N0.getOpcode() == ISD::XOR) {
2819    ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0));
2820    ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2821    if (N00C)
2822      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(1),
2823                         DAG.getConstant(N1C->getAPIntValue() ^
2824                                         N00C->getAPIntValue(), VT));
2825    if (N01C)
2826      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(0),
2827                         DAG.getConstant(N1C->getAPIntValue() ^
2828                                         N01C->getAPIntValue(), VT));
2829  }
2830  // fold (xor x, x) -> 0
2831  if (N0 == N1) {
2832    if (!VT.isVector()) {
2833      return DAG.getConstant(0, VT);
2834    } else if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)){
2835      // Produce a vector of zeros.
2836      SDValue El = DAG.getConstant(0, VT.getVectorElementType());
2837      std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
2838      return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
2839                         &Ops[0], Ops.size());
2840    }
2841  }
2842
2843  // Simplify: xor (op x...), (op y...)  -> (op (xor x, y))
2844  if (N0.getOpcode() == N1.getOpcode()) {
2845    SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
2846    if (Tmp.getNode()) return Tmp;
2847  }
2848
2849  // Simplify the expression using non-local knowledge.
2850  if (!VT.isVector() &&
2851      SimplifyDemandedBits(SDValue(N, 0)))
2852    return SDValue(N, 0);
2853
2854  return SDValue();
2855}
2856
2857/// visitShiftByConstant - Handle transforms common to the three shifts, when
2858/// the shift amount is a constant.
2859SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
2860  SDNode *LHS = N->getOperand(0).getNode();
2861  if (!LHS->hasOneUse()) return SDValue();
2862
2863  // We want to pull some binops through shifts, so that we have (and (shift))
2864  // instead of (shift (and)), likewise for add, or, xor, etc.  This sort of
2865  // thing happens with address calculations, so it's important to canonicalize
2866  // it.
2867  bool HighBitSet = false;  // Can we transform this if the high bit is set?
2868
2869  switch (LHS->getOpcode()) {
2870  default: return SDValue();
2871  case ISD::OR:
2872  case ISD::XOR:
2873    HighBitSet = false; // We can only transform sra if the high bit is clear.
2874    break;
2875  case ISD::AND:
2876    HighBitSet = true;  // We can only transform sra if the high bit is set.
2877    break;
2878  case ISD::ADD:
2879    if (N->getOpcode() != ISD::SHL)
2880      return SDValue(); // only shl(add) not sr[al](add).
2881    HighBitSet = false; // We can only transform sra if the high bit is clear.
2882    break;
2883  }
2884
2885  // We require the RHS of the binop to be a constant as well.
2886  ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
2887  if (!BinOpCst) return SDValue();
2888
2889  // FIXME: disable this unless the input to the binop is a shift by a constant.
2890  // If it is not a shift, it pessimizes some common cases like:
2891  //
2892  //    void foo(int *X, int i) { X[i & 1235] = 1; }
2893  //    int bar(int *X, int i) { return X[i & 255]; }
2894  SDNode *BinOpLHSVal = LHS->getOperand(0).getNode();
2895  if ((BinOpLHSVal->getOpcode() != ISD::SHL &&
2896       BinOpLHSVal->getOpcode() != ISD::SRA &&
2897       BinOpLHSVal->getOpcode() != ISD::SRL) ||
2898      !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
2899    return SDValue();
2900
2901  EVT VT = N->getValueType(0);
2902
2903  // If this is a signed shift right, and the high bit is modified by the
2904  // logical operation, do not perform the transformation. The highBitSet
2905  // boolean indicates the value of the high bit of the constant which would
2906  // cause it to be modified for this operation.
2907  if (N->getOpcode() == ISD::SRA) {
2908    bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
2909    if (BinOpRHSSignSet != HighBitSet)
2910      return SDValue();
2911  }
2912
2913  // Fold the constants, shifting the binop RHS by the shift amount.
2914  SDValue NewRHS = DAG.getNode(N->getOpcode(), LHS->getOperand(1).getDebugLoc(),
2915                               N->getValueType(0),
2916                               LHS->getOperand(1), N->getOperand(1));
2917
2918  // Create the new shift.
2919  SDValue NewShift = DAG.getNode(N->getOpcode(),
2920                                 LHS->getOperand(0).getDebugLoc(),
2921                                 VT, LHS->getOperand(0), N->getOperand(1));
2922
2923  // Create the new binop.
2924  return DAG.getNode(LHS->getOpcode(), N->getDebugLoc(), VT, NewShift, NewRHS);
2925}
2926
2927SDValue DAGCombiner::visitSHL(SDNode *N) {
2928  SDValue N0 = N->getOperand(0);
2929  SDValue N1 = N->getOperand(1);
2930  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2931  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2932  EVT VT = N0.getValueType();
2933  unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
2934
2935  // fold (shl c1, c2) -> c1<<c2
2936  if (N0C && N1C)
2937    return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C);
2938  // fold (shl 0, x) -> 0
2939  if (N0C && N0C->isNullValue())
2940    return N0;
2941  // fold (shl x, c >= size(x)) -> undef
2942  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
2943    return DAG.getUNDEF(VT);
2944  // fold (shl x, 0) -> x
2945  if (N1C && N1C->isNullValue())
2946    return N0;
2947  // if (shl x, c) is known to be zero, return 0
2948  if (DAG.MaskedValueIsZero(SDValue(N, 0),
2949                            APInt::getAllOnesValue(OpSizeInBits)))
2950    return DAG.getConstant(0, VT);
2951  // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
2952  if (N1.getOpcode() == ISD::TRUNCATE &&
2953      N1.getOperand(0).getOpcode() == ISD::AND &&
2954      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
2955    SDValue N101 = N1.getOperand(0).getOperand(1);
2956    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
2957      EVT TruncVT = N1.getValueType();
2958      SDValue N100 = N1.getOperand(0).getOperand(0);
2959      APInt TruncC = N101C->getAPIntValue();
2960      TruncC = TruncC.trunc(TruncVT.getSizeInBits());
2961      return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
2962                         DAG.getNode(ISD::AND, N->getDebugLoc(), TruncVT,
2963                                     DAG.getNode(ISD::TRUNCATE,
2964                                                 N->getDebugLoc(),
2965                                                 TruncVT, N100),
2966                                     DAG.getConstant(TruncC, TruncVT)));
2967    }
2968  }
2969
2970  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
2971    return SDValue(N, 0);
2972
2973  // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
2974  if (N1C && N0.getOpcode() == ISD::SHL &&
2975      N0.getOperand(1).getOpcode() == ISD::Constant) {
2976    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
2977    uint64_t c2 = N1C->getZExtValue();
2978    if (c1 + c2 >= OpSizeInBits)
2979      return DAG.getConstant(0, VT);
2980    return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0),
2981                       DAG.getConstant(c1 + c2, N1.getValueType()));
2982  }
2983
2984  // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
2985  // For this to be valid, the second form must not preserve any of the bits
2986  // that are shifted out by the inner shift in the first form.  This means
2987  // the outer shift size must be >= the number of bits added by the ext.
2988  // As a corollary, we don't care what kind of ext it is.
2989  if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND ||
2990              N0.getOpcode() == ISD::ANY_EXTEND ||
2991              N0.getOpcode() == ISD::SIGN_EXTEND) &&
2992      N0.getOperand(0).getOpcode() == ISD::SHL &&
2993      isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
2994    uint64_t c1 =
2995      cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
2996    uint64_t c2 = N1C->getZExtValue();
2997    EVT InnerShiftVT = N0.getOperand(0).getValueType();
2998    uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
2999    if (c2 >= OpSizeInBits - InnerShiftSize) {
3000      if (c1 + c2 >= OpSizeInBits)
3001        return DAG.getConstant(0, VT);
3002      return DAG.getNode(ISD::SHL, N0->getDebugLoc(), VT,
3003                         DAG.getNode(N0.getOpcode(), N0->getDebugLoc(), VT,
3004                                     N0.getOperand(0)->getOperand(0)),
3005                         DAG.getConstant(c1 + c2, N1.getValueType()));
3006    }
3007  }
3008
3009  // fold (shl (srl x, c1), c2) -> (shl (and x, (shl -1, c1)), (sub c2, c1)) or
3010  //                               (srl (and x, (shl -1, c1)), (sub c1, c2))
3011  if (N1C && N0.getOpcode() == ISD::SRL &&
3012      N0.getOperand(1).getOpcode() == ISD::Constant) {
3013    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
3014    if (c1 < VT.getSizeInBits()) {
3015      uint64_t c2 = N1C->getZExtValue();
3016      SDValue HiBitsMask =
3017        DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
3018                                              VT.getSizeInBits() - c1),
3019                        VT);
3020      SDValue Mask = DAG.getNode(ISD::AND, N0.getDebugLoc(), VT,
3021                                 N0.getOperand(0),
3022                                 HiBitsMask);
3023      if (c2 > c1)
3024        return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, Mask,
3025                           DAG.getConstant(c2-c1, N1.getValueType()));
3026      else
3027        return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Mask,
3028                           DAG.getConstant(c1-c2, N1.getValueType()));
3029    }
3030  }
3031  // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
3032  if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
3033    SDValue HiBitsMask =
3034      DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
3035                                            VT.getSizeInBits() -
3036                                              N1C->getZExtValue()),
3037                      VT);
3038    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
3039                       HiBitsMask);
3040  }
3041
3042  if (N1C) {
3043    SDValue NewSHL = visitShiftByConstant(N, N1C->getZExtValue());
3044    if (NewSHL.getNode())
3045      return NewSHL;
3046  }
3047
3048  return SDValue();
3049}
3050
3051SDValue DAGCombiner::visitSRA(SDNode *N) {
3052  SDValue N0 = N->getOperand(0);
3053  SDValue N1 = N->getOperand(1);
3054  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
3055  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3056  EVT VT = N0.getValueType();
3057  unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
3058
3059  // fold (sra c1, c2) -> (sra c1, c2)
3060  if (N0C && N1C)
3061    return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C);
3062  // fold (sra 0, x) -> 0
3063  if (N0C && N0C->isNullValue())
3064    return N0;
3065  // fold (sra -1, x) -> -1
3066  if (N0C && N0C->isAllOnesValue())
3067    return N0;
3068  // fold (sra x, (setge c, size(x))) -> undef
3069  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
3070    return DAG.getUNDEF(VT);
3071  // fold (sra x, 0) -> x
3072  if (N1C && N1C->isNullValue())
3073    return N0;
3074  // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
3075  // sext_inreg.
3076  if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
3077    unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
3078    EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
3079    if (VT.isVector())
3080      ExtVT = EVT::getVectorVT(*DAG.getContext(),
3081                               ExtVT, VT.getVectorNumElements());
3082    if ((!LegalOperations ||
3083         TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
3084      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
3085                         N0.getOperand(0), DAG.getValueType(ExtVT));
3086  }
3087
3088  // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
3089  if (N1C && N0.getOpcode() == ISD::SRA) {
3090    if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3091      unsigned Sum = N1C->getZExtValue() + C1->getZExtValue();
3092      if (Sum >= OpSizeInBits) Sum = OpSizeInBits-1;
3093      return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0.getOperand(0),
3094                         DAG.getConstant(Sum, N1C->getValueType(0)));
3095    }
3096  }
3097
3098  // fold (sra (shl X, m), (sub result_size, n))
3099  // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
3100  // result_size - n != m.
3101  // If truncate is free for the target sext(shl) is likely to result in better
3102  // code.
3103  if (N0.getOpcode() == ISD::SHL) {
3104    // Get the two constanst of the shifts, CN0 = m, CN = n.
3105    const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3106    if (N01C && N1C) {
3107      // Determine what the truncate's result bitsize and type would be.
3108      EVT TruncVT =
3109        EVT::getIntegerVT(*DAG.getContext(),
3110                          OpSizeInBits - N1C->getZExtValue());
3111      // Determine the residual right-shift amount.
3112      signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
3113
3114      // If the shift is not a no-op (in which case this should be just a sign
3115      // extend already), the truncated to type is legal, sign_extend is legal
3116      // on that type, and the truncate to that type is both legal and free,
3117      // perform the transform.
3118      if ((ShiftAmt > 0) &&
3119          TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
3120          TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
3121          TLI.isTruncateFree(VT, TruncVT)) {
3122
3123          SDValue Amt = DAG.getConstant(ShiftAmt, getShiftAmountTy());
3124          SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT,
3125                                      N0.getOperand(0), Amt);
3126          SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), TruncVT,
3127                                      Shift);
3128          return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(),
3129                             N->getValueType(0), Trunc);
3130      }
3131    }
3132  }
3133
3134  // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
3135  if (N1.getOpcode() == ISD::TRUNCATE &&
3136      N1.getOperand(0).getOpcode() == ISD::AND &&
3137      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
3138    SDValue N101 = N1.getOperand(0).getOperand(1);
3139    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
3140      EVT TruncVT = N1.getValueType();
3141      SDValue N100 = N1.getOperand(0).getOperand(0);
3142      APInt TruncC = N101C->getAPIntValue();
3143      TruncC = TruncC.trunc(TruncVT.getScalarType().getSizeInBits());
3144      return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
3145                         DAG.getNode(ISD::AND, N->getDebugLoc(),
3146                                     TruncVT,
3147                                     DAG.getNode(ISD::TRUNCATE,
3148                                                 N->getDebugLoc(),
3149                                                 TruncVT, N100),
3150                                     DAG.getConstant(TruncC, TruncVT)));
3151    }
3152  }
3153
3154  // Simplify, based on bits shifted out of the LHS.
3155  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
3156    return SDValue(N, 0);
3157
3158
3159  // If the sign bit is known to be zero, switch this to a SRL.
3160  if (DAG.SignBitIsZero(N0))
3161    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, N1);
3162
3163  if (N1C) {
3164    SDValue NewSRA = visitShiftByConstant(N, N1C->getZExtValue());
3165    if (NewSRA.getNode())
3166      return NewSRA;
3167  }
3168
3169  return SDValue();
3170}
3171
3172SDValue DAGCombiner::visitSRL(SDNode *N) {
3173  SDValue N0 = N->getOperand(0);
3174  SDValue N1 = N->getOperand(1);
3175  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
3176  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3177  EVT VT = N0.getValueType();
3178  unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
3179
3180  // fold (srl c1, c2) -> c1 >>u c2
3181  if (N0C && N1C)
3182    return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C);
3183  // fold (srl 0, x) -> 0
3184  if (N0C && N0C->isNullValue())
3185    return N0;
3186  // fold (srl x, c >= size(x)) -> undef
3187  if (N1C && N1C->getZExtValue() >= OpSizeInBits)
3188    return DAG.getUNDEF(VT);
3189  // fold (srl x, 0) -> x
3190  if (N1C && N1C->isNullValue())
3191    return N0;
3192  // if (srl x, c) is known to be zero, return 0
3193  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
3194                                   APInt::getAllOnesValue(OpSizeInBits)))
3195    return DAG.getConstant(0, VT);
3196
3197  // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
3198  if (N1C && N0.getOpcode() == ISD::SRL &&
3199      N0.getOperand(1).getOpcode() == ISD::Constant) {
3200    uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
3201    uint64_t c2 = N1C->getZExtValue();
3202    if (c1 + c2 >= OpSizeInBits)
3203      return DAG.getConstant(0, VT);
3204    return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
3205                       DAG.getConstant(c1 + c2, N1.getValueType()));
3206  }
3207
3208  // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
3209  if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
3210      N0.getOperand(0).getOpcode() == ISD::SRL &&
3211      isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
3212    uint64_t c1 =
3213      cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
3214    uint64_t c2 = N1C->getZExtValue();
3215    EVT InnerShiftVT = N0.getOperand(0).getValueType();
3216    EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType();
3217    uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
3218    // This is only valid if the OpSizeInBits + c1 = size of inner shift.
3219    if (c1 + OpSizeInBits == InnerShiftSize) {
3220      if (c1 + c2 >= InnerShiftSize)
3221        return DAG.getConstant(0, VT);
3222      return DAG.getNode(ISD::TRUNCATE, N0->getDebugLoc(), VT,
3223                         DAG.getNode(ISD::SRL, N0->getDebugLoc(), InnerShiftVT,
3224                                     N0.getOperand(0)->getOperand(0),
3225                                     DAG.getConstant(c1 + c2, ShiftCountVT)));
3226    }
3227  }
3228
3229  // fold (srl (shl x, c), c) -> (and x, cst2)
3230  if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
3231      N0.getValueSizeInBits() <= 64) {
3232    uint64_t ShAmt = N1C->getZExtValue()+64-N0.getValueSizeInBits();
3233    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
3234                       DAG.getConstant(~0ULL >> ShAmt, VT));
3235  }
3236
3237
3238  // fold (srl (anyextend x), c) -> (anyextend (srl x, c))
3239  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
3240    // Shifting in all undef bits?
3241    EVT SmallVT = N0.getOperand(0).getValueType();
3242    if (N1C->getZExtValue() >= SmallVT.getSizeInBits())
3243      return DAG.getUNDEF(VT);
3244
3245    if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
3246      SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT,
3247                                       N0.getOperand(0), N1);
3248      AddToWorkList(SmallShift.getNode());
3249      return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift);
3250    }
3251  }
3252
3253  // fold (srl (sra X, Y), 31) -> (srl X, 31).  This srl only looks at the sign
3254  // bit, which is unmodified by sra.
3255  if (N1C && N1C->getZExtValue() + 1 == VT.getSizeInBits()) {
3256    if (N0.getOpcode() == ISD::SRA)
3257      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), N1);
3258  }
3259
3260  // fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
3261  if (N1C && N0.getOpcode() == ISD::CTLZ &&
3262      N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
3263    APInt KnownZero, KnownOne;
3264    APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
3265    DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
3266
3267    // If any of the input bits are KnownOne, then the input couldn't be all
3268    // zeros, thus the result of the srl will always be zero.
3269    if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT);
3270
3271    // If all of the bits input the to ctlz node are known to be zero, then
3272    // the result of the ctlz is "32" and the result of the shift is one.
3273    APInt UnknownBits = ~KnownZero & Mask;
3274    if (UnknownBits == 0) return DAG.getConstant(1, VT);
3275
3276    // Otherwise, check to see if there is exactly one bit input to the ctlz.
3277    if ((UnknownBits & (UnknownBits - 1)) == 0) {
3278      // Okay, we know that only that the single bit specified by UnknownBits
3279      // could be set on input to the CTLZ node. If this bit is set, the SRL
3280      // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
3281      // to an SRL/XOR pair, which is likely to simplify more.
3282      unsigned ShAmt = UnknownBits.countTrailingZeros();
3283      SDValue Op = N0.getOperand(0);
3284
3285      if (ShAmt) {
3286        Op = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, Op,
3287                         DAG.getConstant(ShAmt, getShiftAmountTy()));
3288        AddToWorkList(Op.getNode());
3289      }
3290
3291      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
3292                         Op, DAG.getConstant(1, VT));
3293    }
3294  }
3295
3296  // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
3297  if (N1.getOpcode() == ISD::TRUNCATE &&
3298      N1.getOperand(0).getOpcode() == ISD::AND &&
3299      N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
3300    SDValue N101 = N1.getOperand(0).getOperand(1);
3301    if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
3302      EVT TruncVT = N1.getValueType();
3303      SDValue N100 = N1.getOperand(0).getOperand(0);
3304      APInt TruncC = N101C->getAPIntValue();
3305      TruncC = TruncC.trunc(TruncVT.getSizeInBits());
3306      return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
3307                         DAG.getNode(ISD::AND, N->getDebugLoc(),
3308                                     TruncVT,
3309                                     DAG.getNode(ISD::TRUNCATE,
3310                                                 N->getDebugLoc(),
3311                                                 TruncVT, N100),
3312                                     DAG.getConstant(TruncC, TruncVT)));
3313    }
3314  }
3315
3316  // fold operands of srl based on knowledge that the low bits are not
3317  // demanded.
3318  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
3319    return SDValue(N, 0);
3320
3321  if (N1C) {
3322    SDValue NewSRL = visitShiftByConstant(N, N1C->getZExtValue());
3323    if (NewSRL.getNode())
3324      return NewSRL;
3325  }
3326
3327  // Attempt to convert a srl of a load into a narrower zero-extending load.
3328  SDValue NarrowLoad = ReduceLoadWidth(N);
3329  if (NarrowLoad.getNode())
3330    return NarrowLoad;
3331
3332  // Here is a common situation. We want to optimize:
3333  //
3334  //   %a = ...
3335  //   %b = and i32 %a, 2
3336  //   %c = srl i32 %b, 1
3337  //   brcond i32 %c ...
3338  //
3339  // into
3340  //
3341  //   %a = ...
3342  //   %b = and %a, 2
3343  //   %c = setcc eq %b, 0
3344  //   brcond %c ...
3345  //
3346  // However when after the source operand of SRL is optimized into AND, the SRL
3347  // itself may not be optimized further. Look for it and add the BRCOND into
3348  // the worklist.
3349  if (N->hasOneUse()) {
3350    SDNode *Use = *N->use_begin();
3351    if (Use->getOpcode() == ISD::BRCOND)
3352      AddToWorkList(Use);
3353    else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
3354      // Also look pass the truncate.
3355      Use = *Use->use_begin();
3356      if (Use->getOpcode() == ISD::BRCOND)
3357        AddToWorkList(Use);
3358    }
3359  }
3360
3361  return SDValue();
3362}
3363
3364SDValue DAGCombiner::visitCTLZ(SDNode *N) {
3365  SDValue N0 = N->getOperand(0);
3366  EVT VT = N->getValueType(0);
3367
3368  // fold (ctlz c1) -> c2
3369  if (isa<ConstantSDNode>(N0))
3370    return DAG.getNode(ISD::CTLZ, N->getDebugLoc(), VT, N0);
3371  return SDValue();
3372}
3373
3374SDValue DAGCombiner::visitCTTZ(SDNode *N) {
3375  SDValue N0 = N->getOperand(0);
3376  EVT VT = N->getValueType(0);
3377
3378  // fold (cttz c1) -> c2
3379  if (isa<ConstantSDNode>(N0))
3380    return DAG.getNode(ISD::CTTZ, N->getDebugLoc(), VT, N0);
3381  return SDValue();
3382}
3383
3384SDValue DAGCombiner::visitCTPOP(SDNode *N) {
3385  SDValue N0 = N->getOperand(0);
3386  EVT VT = N->getValueType(0);
3387
3388  // fold (ctpop c1) -> c2
3389  if (isa<ConstantSDNode>(N0))
3390    return DAG.getNode(ISD::CTPOP, N->getDebugLoc(), VT, N0);
3391  return SDValue();
3392}
3393
3394SDValue DAGCombiner::visitSELECT(SDNode *N) {
3395  SDValue N0 = N->getOperand(0);
3396  SDValue N1 = N->getOperand(1);
3397  SDValue N2 = N->getOperand(2);
3398  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
3399  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3400  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3401  EVT VT = N->getValueType(0);
3402  EVT VT0 = N0.getValueType();
3403
3404  // fold (select C, X, X) -> X
3405  if (N1 == N2)
3406    return N1;
3407  // fold (select true, X, Y) -> X
3408  if (N0C && !N0C->isNullValue())
3409    return N1;
3410  // fold (select false, X, Y) -> Y
3411  if (N0C && N0C->isNullValue())
3412    return N2;
3413  // fold (select C, 1, X) -> (or C, X)
3414  if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
3415    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
3416  // fold (select C, 0, 1) -> (xor C, 1)
3417  if (VT.isInteger() &&
3418      (VT0 == MVT::i1 ||
3419       (VT0.isInteger() &&
3420        TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent)) &&
3421      N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
3422    SDValue XORNode;
3423    if (VT == VT0)
3424      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT0,
3425                         N0, DAG.getConstant(1, VT0));
3426    XORNode = DAG.getNode(ISD::XOR, N0.getDebugLoc(), VT0,
3427                          N0, DAG.getConstant(1, VT0));
3428    AddToWorkList(XORNode.getNode());
3429    if (VT.bitsGT(VT0))
3430      return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, XORNode);
3431    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, XORNode);
3432  }
3433  // fold (select C, 0, X) -> (and (not C), X)
3434  if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
3435    SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
3436    AddToWorkList(NOTNode.getNode());
3437    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, NOTNode, N2);
3438  }
3439  // fold (select C, X, 1) -> (or (not C), X)
3440  if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
3441    SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
3442    AddToWorkList(NOTNode.getNode());
3443    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, NOTNode, N1);
3444  }
3445  // fold (select C, X, 0) -> (and C, X)
3446  if (VT == MVT::i1 && N2C && N2C->isNullValue())
3447    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
3448  // fold (select X, X, Y) -> (or X, Y)
3449  // fold (select X, 1, Y) -> (or X, Y)
3450  if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
3451    return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
3452  // fold (select X, Y, X) -> (and X, Y)
3453  // fold (select X, Y, 0) -> (and X, Y)
3454  if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
3455    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
3456
3457  // If we can fold this based on the true/false value, do so.
3458  if (SimplifySelectOps(N, N1, N2))
3459    return SDValue(N, 0);  // Don't revisit N.
3460
3461  // fold selects based on a setcc into other things, such as min/max/abs
3462  if (N0.getOpcode() == ISD::SETCC) {
3463    // FIXME:
3464    // Check against MVT::Other for SELECT_CC, which is a workaround for targets
3465    // having to say they don't support SELECT_CC on every type the DAG knows
3466    // about, since there is no way to mark an opcode illegal at all value types
3467    if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) &&
3468        TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))
3469      return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT,
3470                         N0.getOperand(0), N0.getOperand(1),
3471                         N1, N2, N0.getOperand(2));
3472    return SimplifySelect(N->getDebugLoc(), N0, N1, N2);
3473  }
3474
3475  return SDValue();
3476}
3477
3478SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
3479  SDValue N0 = N->getOperand(0);
3480  SDValue N1 = N->getOperand(1);
3481  SDValue N2 = N->getOperand(2);
3482  SDValue N3 = N->getOperand(3);
3483  SDValue N4 = N->getOperand(4);
3484  ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
3485
3486  // fold select_cc lhs, rhs, x, x, cc -> x
3487  if (N2 == N3)
3488    return N2;
3489
3490  // Determine if the condition we're dealing with is constant
3491  SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()),
3492                              N0, N1, CC, N->getDebugLoc(), false);
3493  if (SCC.getNode()) AddToWorkList(SCC.getNode());
3494
3495  if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
3496    if (!SCCC->isNullValue())
3497      return N2;    // cond always true -> true val
3498    else
3499      return N3;    // cond always false -> false val
3500  }
3501
3502  // Fold to a simpler select_cc
3503  if (SCC.getNode() && SCC.getOpcode() == ISD::SETCC)
3504    return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N2.getValueType(),
3505                       SCC.getOperand(0), SCC.getOperand(1), N2, N3,
3506                       SCC.getOperand(2));
3507
3508  // If we can fold this based on the true/false value, do so.
3509  if (SimplifySelectOps(N, N2, N3))
3510    return SDValue(N, 0);  // Don't revisit N.
3511
3512  // fold select_cc into other things, such as min/max/abs
3513  return SimplifySelectCC(N->getDebugLoc(), N0, N1, N2, N3, CC);
3514}
3515
3516SDValue DAGCombiner::visitSETCC(SDNode *N) {
3517  return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1),
3518                       cast<CondCodeSDNode>(N->getOperand(2))->get(),
3519                       N->getDebugLoc());
3520}
3521
3522// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
3523// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
3524// transformation. Returns true if extension are possible and the above
3525// mentioned transformation is profitable.
3526static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
3527                                    unsigned ExtOpc,
3528                                    SmallVector<SDNode*, 4> &ExtendNodes,
3529                                    const TargetLowering &TLI) {
3530  bool HasCopyToRegUses = false;
3531  bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
3532  for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
3533                            UE = N0.getNode()->use_end();
3534       UI != UE; ++UI) {
3535    SDNode *User = *UI;
3536    if (User == N)
3537      continue;
3538    if (UI.getUse().getResNo() != N0.getResNo())
3539      continue;
3540    // FIXME: Only extend SETCC N, N and SETCC N, c for now.
3541    if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
3542      ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
3543      if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
3544        // Sign bits will be lost after a zext.
3545        return false;
3546      bool Add = false;
3547      for (unsigned i = 0; i != 2; ++i) {
3548        SDValue UseOp = User->getOperand(i);
3549        if (UseOp == N0)
3550          continue;
3551        if (!isa<ConstantSDNode>(UseOp))
3552          return false;
3553        Add = true;
3554      }
3555      if (Add)
3556        ExtendNodes.push_back(User);
3557      continue;
3558    }
3559    // If truncates aren't free and there are users we can't
3560    // extend, it isn't worthwhile.
3561    if (!isTruncFree)
3562      return false;
3563    // Remember if this value is live-out.
3564    if (User->getOpcode() == ISD::CopyToReg)
3565      HasCopyToRegUses = true;
3566  }
3567
3568  if (HasCopyToRegUses) {
3569    bool BothLiveOut = false;
3570    for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
3571         UI != UE; ++UI) {
3572      SDUse &Use = UI.getUse();
3573      if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
3574        BothLiveOut = true;
3575        break;
3576      }
3577    }
3578    if (BothLiveOut)
3579      // Both unextended and extended values are live out. There had better be
3580      // a good reason for the transformation.
3581      return ExtendNodes.size();
3582  }
3583  return true;
3584}
3585
3586SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
3587  SDValue N0 = N->getOperand(0);
3588  EVT VT = N->getValueType(0);
3589
3590  // fold (sext c1) -> c1
3591  if (isa<ConstantSDNode>(N0))
3592    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N0);
3593
3594  // fold (sext (sext x)) -> (sext x)
3595  // fold (sext (aext x)) -> (sext x)
3596  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
3597    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT,
3598                       N0.getOperand(0));
3599
3600  if (N0.getOpcode() == ISD::TRUNCATE) {
3601    // fold (sext (truncate (load x))) -> (sext (smaller load x))
3602    // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
3603    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3604    if (NarrowLoad.getNode()) {
3605      SDNode* oye = N0.getNode()->getOperand(0).getNode();
3606      if (NarrowLoad.getNode() != N0.getNode()) {
3607        CombineTo(N0.getNode(), NarrowLoad);
3608        // CombineTo deleted the truncate, if needed, but not what's under it.
3609        AddToWorkList(oye);
3610      }
3611      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3612    }
3613
3614    // See if the value being truncated is already sign extended.  If so, just
3615    // eliminate the trunc/sext pair.
3616    SDValue Op = N0.getOperand(0);
3617    unsigned OpBits   = Op.getValueType().getScalarType().getSizeInBits();
3618    unsigned MidBits  = N0.getValueType().getScalarType().getSizeInBits();
3619    unsigned DestBits = VT.getScalarType().getSizeInBits();
3620    unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
3621
3622    if (OpBits == DestBits) {
3623      // Op is i32, Mid is i8, and Dest is i32.  If Op has more than 24 sign
3624      // bits, it is already ready.
3625      if (NumSignBits > DestBits-MidBits)
3626        return Op;
3627    } else if (OpBits < DestBits) {
3628      // Op is i32, Mid is i8, and Dest is i64.  If Op has more than 24 sign
3629      // bits, just sext from i32.
3630      if (NumSignBits > OpBits-MidBits)
3631        return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, Op);
3632    } else {
3633      // Op is i64, Mid is i8, and Dest is i32.  If Op has more than 56 sign
3634      // bits, just truncate to i32.
3635      if (NumSignBits > OpBits-MidBits)
3636        return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
3637    }
3638
3639    // fold (sext (truncate x)) -> (sextinreg x).
3640    if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
3641                                                 N0.getValueType())) {
3642      if (OpBits < DestBits)
3643        Op = DAG.getNode(ISD::ANY_EXTEND, N0.getDebugLoc(), VT, Op);
3644      else if (OpBits > DestBits)
3645        Op = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), VT, Op);
3646      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, Op,
3647                         DAG.getValueType(N0.getValueType()));
3648    }
3649  }
3650
3651  // fold (sext (load x)) -> (sext (truncate (sextload x)))
3652  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3653      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3654       TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
3655    bool DoXform = true;
3656    SmallVector<SDNode*, 4> SetCCs;
3657    if (!N0.hasOneUse())
3658      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
3659    if (DoXform) {
3660      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3661      SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
3662                                       LN0->getChain(),
3663                                       LN0->getBasePtr(), LN0->getPointerInfo(),
3664                                       N0.getValueType(),
3665                                       LN0->isVolatile(), LN0->isNonTemporal(),
3666                                       LN0->getAlignment());
3667      CombineTo(N, ExtLoad);
3668      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3669                                  N0.getValueType(), ExtLoad);
3670      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3671
3672      // Extend SetCC uses if necessary.
3673      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3674        SDNode *SetCC = SetCCs[i];
3675        SmallVector<SDValue, 4> Ops;
3676
3677        for (unsigned j = 0; j != 2; ++j) {
3678          SDValue SOp = SetCC->getOperand(j);
3679          if (SOp == Trunc)
3680            Ops.push_back(ExtLoad);
3681          else
3682            Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND,
3683                                      N->getDebugLoc(), VT, SOp));
3684        }
3685
3686        Ops.push_back(SetCC->getOperand(2));
3687        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3688                                     SetCC->getValueType(0),
3689                                     &Ops[0], Ops.size()));
3690      }
3691
3692      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3693    }
3694  }
3695
3696  // fold (sext (sextload x)) -> (sext (truncate (sextload x)))
3697  // fold (sext ( extload x)) -> (sext (truncate (sextload x)))
3698  if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
3699      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
3700    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3701    EVT MemVT = LN0->getMemoryVT();
3702    if ((!LegalOperations && !LN0->isVolatile()) ||
3703        TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
3704      SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
3705                                       LN0->getChain(),
3706                                       LN0->getBasePtr(), LN0->getPointerInfo(),
3707                                       MemVT,
3708                                       LN0->isVolatile(), LN0->isNonTemporal(),
3709                                       LN0->getAlignment());
3710      CombineTo(N, ExtLoad);
3711      CombineTo(N0.getNode(),
3712                DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3713                            N0.getValueType(), ExtLoad),
3714                ExtLoad.getValue(1));
3715      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3716    }
3717  }
3718
3719  if (N0.getOpcode() == ISD::SETCC) {
3720    // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
3721    // Only do this before legalize for now.
3722    if (VT.isVector() && !LegalOperations) {
3723      EVT N0VT = N0.getOperand(0).getValueType();
3724        // We know that the # elements of the results is the same as the
3725        // # elements of the compare (and the # elements of the compare result
3726        // for that matter).  Check to see that they are the same size.  If so,
3727        // we know that the element size of the sext'd result matches the
3728        // element size of the compare operands.
3729      if (VT.getSizeInBits() == N0VT.getSizeInBits())
3730        return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
3731                             N0.getOperand(1),
3732                             cast<CondCodeSDNode>(N0.getOperand(2))->get());
3733      // If the desired elements are smaller or larger than the source
3734      // elements we can use a matching integer vector type and then
3735      // truncate/sign extend
3736      else {
3737        EVT MatchingElementType =
3738          EVT::getIntegerVT(*DAG.getContext(),
3739                            N0VT.getScalarType().getSizeInBits());
3740        EVT MatchingVectorType =
3741          EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
3742                           N0VT.getVectorNumElements());
3743        SDValue VsetCC =
3744          DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
3745                        N0.getOperand(1),
3746                        cast<CondCodeSDNode>(N0.getOperand(2))->get());
3747        return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
3748      }
3749    }
3750
3751    // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
3752    unsigned ElementWidth = VT.getScalarType().getSizeInBits();
3753    SDValue NegOne =
3754      DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT);
3755    SDValue SCC =
3756      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3757                       NegOne, DAG.getConstant(0, VT),
3758                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3759    if (SCC.getNode()) return SCC;
3760    if (!LegalOperations ||
3761        TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(VT)))
3762      return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
3763                         DAG.getSetCC(N->getDebugLoc(),
3764                                      TLI.getSetCCResultType(VT),
3765                                      N0.getOperand(0), N0.getOperand(1),
3766                                 cast<CondCodeSDNode>(N0.getOperand(2))->get()),
3767                         NegOne, DAG.getConstant(0, VT));
3768  }
3769
3770  // fold (sext x) -> (zext x) if the sign bit is known zero.
3771  if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
3772      DAG.SignBitIsZero(N0))
3773    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0);
3774
3775  return SDValue();
3776}
3777
3778SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
3779  SDValue N0 = N->getOperand(0);
3780  EVT VT = N->getValueType(0);
3781
3782  // fold (zext c1) -> c1
3783  if (isa<ConstantSDNode>(N0))
3784    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0);
3785  // fold (zext (zext x)) -> (zext x)
3786  // fold (zext (aext x)) -> (zext x)
3787  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
3788    return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT,
3789                       N0.getOperand(0));
3790
3791  // fold (zext (truncate (load x))) -> (zext (smaller load x))
3792  // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
3793  if (N0.getOpcode() == ISD::TRUNCATE) {
3794    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3795    if (NarrowLoad.getNode()) {
3796      SDNode* oye = N0.getNode()->getOperand(0).getNode();
3797      if (NarrowLoad.getNode() != N0.getNode()) {
3798        CombineTo(N0.getNode(), NarrowLoad);
3799        // CombineTo deleted the truncate, if needed, but not what's under it.
3800        AddToWorkList(oye);
3801      }
3802      return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
3803    }
3804  }
3805
3806  // fold (zext (truncate x)) -> (and x, mask)
3807  if (N0.getOpcode() == ISD::TRUNCATE &&
3808      (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
3809
3810    // fold (zext (truncate (load x))) -> (zext (smaller load x))
3811    // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
3812    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
3813    if (NarrowLoad.getNode()) {
3814      SDNode* oye = N0.getNode()->getOperand(0).getNode();
3815      if (NarrowLoad.getNode() != N0.getNode()) {
3816        CombineTo(N0.getNode(), NarrowLoad);
3817        // CombineTo deleted the truncate, if needed, but not what's under it.
3818        AddToWorkList(oye);
3819      }
3820      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3821    }
3822
3823    SDValue Op = N0.getOperand(0);
3824    if (Op.getValueType().bitsLT(VT)) {
3825      Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
3826    } else if (Op.getValueType().bitsGT(VT)) {
3827      Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
3828    }
3829    return DAG.getZeroExtendInReg(Op, N->getDebugLoc(),
3830                                  N0.getValueType().getScalarType());
3831  }
3832
3833  // Fold (zext (and (trunc x), cst)) -> (and x, cst),
3834  // if either of the casts is not free.
3835  if (N0.getOpcode() == ISD::AND &&
3836      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
3837      N0.getOperand(1).getOpcode() == ISD::Constant &&
3838      (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
3839                           N0.getValueType()) ||
3840       !TLI.isZExtFree(N0.getValueType(), VT))) {
3841    SDValue X = N0.getOperand(0).getOperand(0);
3842    if (X.getValueType().bitsLT(VT)) {
3843      X = DAG.getNode(ISD::ANY_EXTEND, X.getDebugLoc(), VT, X);
3844    } else if (X.getValueType().bitsGT(VT)) {
3845      X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
3846    }
3847    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
3848    Mask = Mask.zext(VT.getSizeInBits());
3849    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3850                       X, DAG.getConstant(Mask, VT));
3851  }
3852
3853  // fold (zext (load x)) -> (zext (truncate (zextload x)))
3854  if (ISD::isNON_EXTLoad(N0.getNode()) &&
3855      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
3856       TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
3857    bool DoXform = true;
3858    SmallVector<SDNode*, 4> SetCCs;
3859    if (!N0.hasOneUse())
3860      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
3861    if (DoXform) {
3862      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3863      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
3864                                       LN0->getChain(),
3865                                       LN0->getBasePtr(), LN0->getPointerInfo(),
3866                                       N0.getValueType(),
3867                                       LN0->isVolatile(), LN0->isNonTemporal(),
3868                                       LN0->getAlignment());
3869      CombineTo(N, ExtLoad);
3870      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
3871                                  N0.getValueType(), ExtLoad);
3872      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
3873
3874      // Extend SetCC uses if necessary.
3875      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
3876        SDNode *SetCC = SetCCs[i];
3877        SmallVector<SDValue, 4> Ops;
3878
3879        for (unsigned j = 0; j != 2; ++j) {
3880          SDValue SOp = SetCC->getOperand(j);
3881          if (SOp == Trunc)
3882            Ops.push_back(ExtLoad);
3883          else
3884            Ops.push_back(DAG.getNode(ISD::ZERO_EXTEND,
3885                                      N->getDebugLoc(), VT, SOp));
3886        }
3887
3888        Ops.push_back(SetCC->getOperand(2));
3889        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
3890                                     SetCC->getValueType(0),
3891                                     &Ops[0], Ops.size()));
3892      }
3893
3894      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3895    }
3896  }
3897
3898  // fold (zext (zextload x)) -> (zext (truncate (zextload x)))
3899  // fold (zext ( extload x)) -> (zext (truncate (zextload x)))
3900  if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
3901      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
3902    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
3903    EVT MemVT = LN0->getMemoryVT();
3904    if ((!LegalOperations && !LN0->isVolatile()) ||
3905        TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
3906      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
3907                                       LN0->getChain(),
3908                                       LN0->getBasePtr(), LN0->getPointerInfo(),
3909                                       MemVT,
3910                                       LN0->isVolatile(), LN0->isNonTemporal(),
3911                                       LN0->getAlignment());
3912      CombineTo(N, ExtLoad);
3913      CombineTo(N0.getNode(),
3914                DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), N0.getValueType(),
3915                            ExtLoad),
3916                ExtLoad.getValue(1));
3917      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
3918    }
3919  }
3920
3921  if (N0.getOpcode() == ISD::SETCC) {
3922    if (!LegalOperations && VT.isVector()) {
3923      // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
3924      // Only do this before legalize for now.
3925      EVT N0VT = N0.getOperand(0).getValueType();
3926      EVT EltVT = VT.getVectorElementType();
3927      SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(),
3928                                    DAG.getConstant(1, EltVT));
3929      if (VT.getSizeInBits() == N0VT.getSizeInBits()) {
3930        // We know that the # elements of the results is the same as the
3931        // # elements of the compare (and the # elements of the compare result
3932        // for that matter).  Check to see that they are the same size.  If so,
3933        // we know that the element size of the sext'd result matches the
3934        // element size of the compare operands.
3935        return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3936                           DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
3937                                         N0.getOperand(1),
3938                                 cast<CondCodeSDNode>(N0.getOperand(2))->get()),
3939                           DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
3940                                       &OneOps[0], OneOps.size()));
3941      } else {
3942        // If the desired elements are smaller or larger than the source
3943        // elements we can use a matching integer vector type and then
3944        // truncate/sign extend
3945        EVT MatchingElementType =
3946          EVT::getIntegerVT(*DAG.getContext(),
3947                            N0VT.getScalarType().getSizeInBits());
3948        EVT MatchingVectorType =
3949          EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
3950                           N0VT.getVectorNumElements());
3951        SDValue VsetCC =
3952          DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
3953                        N0.getOperand(1),
3954                        cast<CondCodeSDNode>(N0.getOperand(2))->get());
3955        return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
3956                           DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT),
3957                           DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
3958                                       &OneOps[0], OneOps.size()));
3959      }
3960    }
3961
3962    // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
3963    SDValue SCC =
3964      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
3965                       DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3966                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
3967    if (SCC.getNode()) return SCC;
3968  }
3969
3970  // (zext (shl (zext x), cst)) -> (shl (zext x), cst)
3971  if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
3972      isa<ConstantSDNode>(N0.getOperand(1)) &&
3973      N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
3974      N0.hasOneUse()) {
3975    if (N0.getOpcode() == ISD::SHL) {
3976      // If the original shl may be shifting out bits, do not perform this
3977      // transformation.
3978      unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
3979      unsigned KnownZeroBits = N0.getOperand(0).getValueType().getSizeInBits() -
3980        N0.getOperand(0).getOperand(0).getValueType().getSizeInBits();
3981      if (ShAmt > KnownZeroBits)
3982        return SDValue();
3983    }
3984    DebugLoc dl = N->getDebugLoc();
3985    return DAG.getNode(N0.getOpcode(), dl, VT,
3986                       DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0.getOperand(0)),
3987                       DAG.getNode(ISD::ZERO_EXTEND, dl,
3988                                   N0.getOperand(1).getValueType(),
3989                                   N0.getOperand(1)));
3990  }
3991
3992  return SDValue();
3993}
3994
3995SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
3996  SDValue N0 = N->getOperand(0);
3997  EVT VT = N->getValueType(0);
3998
3999  // fold (aext c1) -> c1
4000  if (isa<ConstantSDNode>(N0))
4001    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, N0);
4002  // fold (aext (aext x)) -> (aext x)
4003  // fold (aext (zext x)) -> (zext x)
4004  // fold (aext (sext x)) -> (sext x)
4005  if (N0.getOpcode() == ISD::ANY_EXTEND  ||
4006      N0.getOpcode() == ISD::ZERO_EXTEND ||
4007      N0.getOpcode() == ISD::SIGN_EXTEND)
4008    return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, N0.getOperand(0));
4009
4010  // fold (aext (truncate (load x))) -> (aext (smaller load x))
4011  // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
4012  if (N0.getOpcode() == ISD::TRUNCATE) {
4013    SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
4014    if (NarrowLoad.getNode()) {
4015      SDNode* oye = N0.getNode()->getOperand(0).getNode();
4016      if (NarrowLoad.getNode() != N0.getNode()) {
4017        CombineTo(N0.getNode(), NarrowLoad);
4018        // CombineTo deleted the truncate, if needed, but not what's under it.
4019        AddToWorkList(oye);
4020      }
4021      return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
4022    }
4023  }
4024
4025  // fold (aext (truncate x))
4026  if (N0.getOpcode() == ISD::TRUNCATE) {
4027    SDValue TruncOp = N0.getOperand(0);
4028    if (TruncOp.getValueType() == VT)
4029      return TruncOp; // x iff x size == zext size.
4030    if (TruncOp.getValueType().bitsGT(VT))
4031      return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, TruncOp);
4032    return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, TruncOp);
4033  }
4034
4035  // Fold (aext (and (trunc x), cst)) -> (and x, cst)
4036  // if the trunc is not free.
4037  if (N0.getOpcode() == ISD::AND &&
4038      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
4039      N0.getOperand(1).getOpcode() == ISD::Constant &&
4040      !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
4041                          N0.getValueType())) {
4042    SDValue X = N0.getOperand(0).getOperand(0);
4043    if (X.getValueType().bitsLT(VT)) {
4044      X = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, X);
4045    } else if (X.getValueType().bitsGT(VT)) {
4046      X = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, X);
4047    }
4048    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
4049    Mask = Mask.zext(VT.getSizeInBits());
4050    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
4051                       X, DAG.getConstant(Mask, VT));
4052  }
4053
4054  // fold (aext (load x)) -> (aext (truncate (extload x)))
4055  if (ISD::isNON_EXTLoad(N0.getNode()) &&
4056      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
4057       TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
4058    bool DoXform = true;
4059    SmallVector<SDNode*, 4> SetCCs;
4060    if (!N0.hasOneUse())
4061      DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
4062    if (DoXform) {
4063      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4064      SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
4065                                       LN0->getChain(),
4066                                       LN0->getBasePtr(), LN0->getPointerInfo(),
4067                                       N0.getValueType(),
4068                                       LN0->isVolatile(), LN0->isNonTemporal(),
4069                                       LN0->getAlignment());
4070      CombineTo(N, ExtLoad);
4071      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
4072                                  N0.getValueType(), ExtLoad);
4073      CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
4074
4075      // Extend SetCC uses if necessary.
4076      for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) {
4077        SDNode *SetCC = SetCCs[i];
4078        SmallVector<SDValue, 4> Ops;
4079
4080        for (unsigned j = 0; j != 2; ++j) {
4081          SDValue SOp = SetCC->getOperand(j);
4082          if (SOp == Trunc)
4083            Ops.push_back(ExtLoad);
4084          else
4085            Ops.push_back(DAG.getNode(ISD::ANY_EXTEND,
4086                                      N->getDebugLoc(), VT, SOp));
4087        }
4088
4089        Ops.push_back(SetCC->getOperand(2));
4090        CombineTo(SetCC, DAG.getNode(ISD::SETCC, N->getDebugLoc(),
4091                                     SetCC->getValueType(0),
4092                                     &Ops[0], Ops.size()));
4093      }
4094
4095      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4096    }
4097  }
4098
4099  // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
4100  // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
4101  // fold (aext ( extload x)) -> (aext (truncate (extload  x)))
4102  if (N0.getOpcode() == ISD::LOAD &&
4103      !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
4104      N0.hasOneUse()) {
4105    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4106    EVT MemVT = LN0->getMemoryVT();
4107    SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
4108                                     N->getDebugLoc(),
4109                                     LN0->getChain(), LN0->getBasePtr(),
4110                                     LN0->getPointerInfo(), MemVT,
4111                                     LN0->isVolatile(), LN0->isNonTemporal(),
4112                                     LN0->getAlignment());
4113    CombineTo(N, ExtLoad);
4114    CombineTo(N0.getNode(),
4115              DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(),
4116                          N0.getValueType(), ExtLoad),
4117              ExtLoad.getValue(1));
4118    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4119  }
4120
4121  if (N0.getOpcode() == ISD::SETCC) {
4122    // aext(setcc) -> sext_in_reg(vsetcc) for vectors.
4123    // Only do this before legalize for now.
4124    if (VT.isVector() && !LegalOperations) {
4125      EVT N0VT = N0.getOperand(0).getValueType();
4126        // We know that the # elements of the results is the same as the
4127        // # elements of the compare (and the # elements of the compare result
4128        // for that matter).  Check to see that they are the same size.  If so,
4129        // we know that the element size of the sext'd result matches the
4130        // element size of the compare operands.
4131      if (VT.getSizeInBits() == N0VT.getSizeInBits())
4132        return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
4133                             N0.getOperand(1),
4134                             cast<CondCodeSDNode>(N0.getOperand(2))->get());
4135      // If the desired elements are smaller or larger than the source
4136      // elements we can use a matching integer vector type and then
4137      // truncate/sign extend
4138      else {
4139        EVT MatchingElementType =
4140          EVT::getIntegerVT(*DAG.getContext(),
4141                            N0VT.getScalarType().getSizeInBits());
4142        EVT MatchingVectorType =
4143          EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
4144                           N0VT.getVectorNumElements());
4145        SDValue VsetCC =
4146          DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
4147                        N0.getOperand(1),
4148                        cast<CondCodeSDNode>(N0.getOperand(2))->get());
4149        return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
4150      }
4151    }
4152
4153    // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
4154    SDValue SCC =
4155      SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
4156                       DAG.getConstant(1, VT), DAG.getConstant(0, VT),
4157                       cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
4158    if (SCC.getNode())
4159      return SCC;
4160  }
4161
4162  return SDValue();
4163}
4164
4165/// GetDemandedBits - See if the specified operand can be simplified with the
4166/// knowledge that only the bits specified by Mask are used.  If so, return the
4167/// simpler operand, otherwise return a null SDValue.
4168SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
4169  switch (V.getOpcode()) {
4170  default: break;
4171  case ISD::OR:
4172  case ISD::XOR:
4173    // If the LHS or RHS don't contribute bits to the or, drop them.
4174    if (DAG.MaskedValueIsZero(V.getOperand(0), Mask))
4175      return V.getOperand(1);
4176    if (DAG.MaskedValueIsZero(V.getOperand(1), Mask))
4177      return V.getOperand(0);
4178    break;
4179  case ISD::SRL:
4180    // Only look at single-use SRLs.
4181    if (!V.getNode()->hasOneUse())
4182      break;
4183    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
4184      // See if we can recursively simplify the LHS.
4185      unsigned Amt = RHSC->getZExtValue();
4186
4187      // Watch out for shift count overflow though.
4188      if (Amt >= Mask.getBitWidth()) break;
4189      APInt NewMask = Mask << Amt;
4190      SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
4191      if (SimplifyLHS.getNode())
4192        return DAG.getNode(ISD::SRL, V.getDebugLoc(), V.getValueType(),
4193                           SimplifyLHS, V.getOperand(1));
4194    }
4195  }
4196  return SDValue();
4197}
4198
4199/// ReduceLoadWidth - If the result of a wider load is shifted to right of N
4200/// bits and then truncated to a narrower type and where N is a multiple
4201/// of number of bits of the narrower type, transform it to a narrower load
4202/// from address + N / num of bits of new type. If the result is to be
4203/// extended, also fold the extension to form a extending load.
4204SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
4205  unsigned Opc = N->getOpcode();
4206
4207  ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
4208  SDValue N0 = N->getOperand(0);
4209  EVT VT = N->getValueType(0);
4210  EVT ExtVT = VT;
4211
4212  // This transformation isn't valid for vector loads.
4213  if (VT.isVector())
4214    return SDValue();
4215
4216  // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
4217  // extended to VT.
4218  if (Opc == ISD::SIGN_EXTEND_INREG) {
4219    ExtType = ISD::SEXTLOAD;
4220    ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
4221    if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
4222      return SDValue();
4223  } else if (Opc == ISD::SRL) {
4224    // Another special-case: SRL is basically zero-extending a narrower value.
4225    ExtType = ISD::ZEXTLOAD;
4226    N0 = SDValue(N, 0);
4227    ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4228    if (!N01) return SDValue();
4229    ExtVT = EVT::getIntegerVT(*DAG.getContext(),
4230                              VT.getSizeInBits() - N01->getZExtValue());
4231  }
4232
4233  unsigned EVTBits = ExtVT.getSizeInBits();
4234
4235  // Do not generate loads of non-round integer types since these can
4236  // be expensive (and would be wrong if the type is not byte sized).
4237  if (!ExtVT.isRound())
4238    return SDValue();
4239
4240  unsigned ShAmt = 0;
4241  if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
4242    if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4243      ShAmt = N01->getZExtValue();
4244      // Is the shift amount a multiple of size of VT?
4245      if ((ShAmt & (EVTBits-1)) == 0) {
4246        N0 = N0.getOperand(0);
4247        // Is the load width a multiple of size of VT?
4248        if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
4249          return SDValue();
4250      }
4251
4252      // At this point, we must have a load or else we can't do the transform.
4253      if (!isa<LoadSDNode>(N0)) return SDValue();
4254
4255      // If the shift amount is larger than the input type then we're not
4256      // accessing any of the loaded bytes.  If the load was a zextload/extload
4257      // then the result of the shift+trunc is zero/undef (handled elsewhere).
4258      // If the load was a sextload then the result is a splat of the sign bit
4259      // of the extended byte.  This is not worth optimizing for.
4260      if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits())
4261        return SDValue();
4262    }
4263  }
4264
4265  // If the load is shifted left (and the result isn't shifted back right),
4266  // we can fold the truncate through the shift.
4267  unsigned ShLeftAmt = 0;
4268  if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
4269      ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
4270    if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
4271      ShLeftAmt = N01->getZExtValue();
4272      N0 = N0.getOperand(0);
4273    }
4274  }
4275
4276  // If we haven't found a load, we can't narrow it.  Don't transform one with
4277  // multiple uses, this would require adding a new load.
4278  if (!isa<LoadSDNode>(N0) || !N0.hasOneUse() ||
4279      // Don't change the width of a volatile load.
4280      cast<LoadSDNode>(N0)->isVolatile())
4281    return SDValue();
4282
4283  // Verify that we are actually reducing a load width here.
4284  if (cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() < EVTBits)
4285    return SDValue();
4286
4287  LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4288  EVT PtrType = N0.getOperand(1).getValueType();
4289
4290  // For big endian targets, we need to adjust the offset to the pointer to
4291  // load the correct bytes.
4292  if (TLI.isBigEndian()) {
4293    unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
4294    unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
4295    ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
4296  }
4297
4298  uint64_t PtrOff = ShAmt / 8;
4299  unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
4300  SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(),
4301                               PtrType, LN0->getBasePtr(),
4302                               DAG.getConstant(PtrOff, PtrType));
4303  AddToWorkList(NewPtr.getNode());
4304
4305  SDValue Load;
4306  if (ExtType == ISD::NON_EXTLOAD)
4307    Load =  DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
4308                        LN0->getPointerInfo().getWithOffset(PtrOff),
4309                        LN0->isVolatile(), LN0->isNonTemporal(), NewAlign);
4310  else
4311    Load = DAG.getExtLoad(ExtType, VT, N0.getDebugLoc(), LN0->getChain(),NewPtr,
4312                          LN0->getPointerInfo().getWithOffset(PtrOff),
4313                          ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
4314                          NewAlign);
4315
4316  // Replace the old load's chain with the new load's chain.
4317  WorkListRemover DeadNodes(*this);
4318  DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1),
4319                                &DeadNodes);
4320
4321  // Shift the result left, if we've swallowed a left shift.
4322  SDValue Result = Load;
4323  if (ShLeftAmt != 0) {
4324    EVT ShImmTy = getShiftAmountTy();
4325    if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
4326      ShImmTy = VT;
4327    Result = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT,
4328                         Result, DAG.getConstant(ShLeftAmt, ShImmTy));
4329  }
4330
4331  // Return the new loaded value.
4332  return Result;
4333}
4334
4335SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
4336  SDValue N0 = N->getOperand(0);
4337  SDValue N1 = N->getOperand(1);
4338  EVT VT = N->getValueType(0);
4339  EVT EVT = cast<VTSDNode>(N1)->getVT();
4340  unsigned VTBits = VT.getScalarType().getSizeInBits();
4341  unsigned EVTBits = EVT.getScalarType().getSizeInBits();
4342
4343  // fold (sext_in_reg c1) -> c1
4344  if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
4345    return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, N0, N1);
4346
4347  // If the input is already sign extended, just drop the extension.
4348  if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1)
4349    return N0;
4350
4351  // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
4352  if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4353      EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) {
4354    return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
4355                       N0.getOperand(0), N1);
4356  }
4357
4358  // fold (sext_in_reg (sext x)) -> (sext x)
4359  // fold (sext_in_reg (aext x)) -> (sext x)
4360  // if x is small enough.
4361  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
4362    SDValue N00 = N0.getOperand(0);
4363    if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits &&
4364        (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
4365      return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N00, N1);
4366  }
4367
4368  // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
4369  if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits)))
4370    return DAG.getZeroExtendInReg(N0, N->getDebugLoc(), EVT);
4371
4372  // fold operands of sext_in_reg based on knowledge that the top bits are not
4373  // demanded.
4374  if (SimplifyDemandedBits(SDValue(N, 0)))
4375    return SDValue(N, 0);
4376
4377  // fold (sext_in_reg (load x)) -> (smaller sextload x)
4378  // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
4379  SDValue NarrowLoad = ReduceLoadWidth(N);
4380  if (NarrowLoad.getNode())
4381    return NarrowLoad;
4382
4383  // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
4384  // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
4385  // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
4386  if (N0.getOpcode() == ISD::SRL) {
4387    if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
4388      if (ShAmt->getZExtValue()+EVTBits <= VTBits) {
4389        // We can turn this into an SRA iff the input to the SRL is already sign
4390        // extended enough.
4391        unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
4392        if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits)
4393          return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT,
4394                             N0.getOperand(0), N0.getOperand(1));
4395      }
4396  }
4397
4398  // fold (sext_inreg (extload x)) -> (sextload x)
4399  if (ISD::isEXTLoad(N0.getNode()) &&
4400      ISD::isUNINDEXEDLoad(N0.getNode()) &&
4401      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
4402      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
4403       TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
4404    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4405    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
4406                                     LN0->getChain(),
4407                                     LN0->getBasePtr(), LN0->getPointerInfo(),
4408                                     EVT,
4409                                     LN0->isVolatile(), LN0->isNonTemporal(),
4410                                     LN0->getAlignment());
4411    CombineTo(N, ExtLoad);
4412    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
4413    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4414  }
4415  // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
4416  if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
4417      N0.hasOneUse() &&
4418      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
4419      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
4420       TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
4421    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4422    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
4423                                     LN0->getChain(),
4424                                     LN0->getBasePtr(), LN0->getPointerInfo(),
4425                                     EVT,
4426                                     LN0->isVolatile(), LN0->isNonTemporal(),
4427                                     LN0->getAlignment());
4428    CombineTo(N, ExtLoad);
4429    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
4430    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
4431  }
4432  return SDValue();
4433}
4434
4435SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
4436  SDValue N0 = N->getOperand(0);
4437  EVT VT = N->getValueType(0);
4438
4439  // noop truncate
4440  if (N0.getValueType() == N->getValueType(0))
4441    return N0;
4442  // fold (truncate c1) -> c1
4443  if (isa<ConstantSDNode>(N0))
4444    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0);
4445  // fold (truncate (truncate x)) -> (truncate x)
4446  if (N0.getOpcode() == ISD::TRUNCATE)
4447    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
4448  // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
4449  if (N0.getOpcode() == ISD::ZERO_EXTEND ||
4450      N0.getOpcode() == ISD::SIGN_EXTEND ||
4451      N0.getOpcode() == ISD::ANY_EXTEND) {
4452    if (N0.getOperand(0).getValueType().bitsLT(VT))
4453      // if the source is smaller than the dest, we still need an extend
4454      return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT,
4455                         N0.getOperand(0));
4456    else if (N0.getOperand(0).getValueType().bitsGT(VT))
4457      // if the source is larger than the dest, than we just need the truncate
4458      return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
4459    else
4460      // if the source and dest are the same type, we can drop both the extend
4461      // and the truncate.
4462      return N0.getOperand(0);
4463  }
4464
4465  // See if we can simplify the input to this truncate through knowledge that
4466  // only the low bits are being used.  For example "trunc (or (shl x, 8), y)"
4467  // -> trunc y
4468  SDValue Shorter =
4469    GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
4470                                             VT.getSizeInBits()));
4471  if (Shorter.getNode())
4472    return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Shorter);
4473
4474  // fold (truncate (load x)) -> (smaller load x)
4475  // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
4476  if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
4477    SDValue Reduced = ReduceLoadWidth(N);
4478    if (Reduced.getNode())
4479      return Reduced;
4480  }
4481
4482  // Simplify the operands using demanded-bits information.
4483  if (!VT.isVector() &&
4484      SimplifyDemandedBits(SDValue(N, 0)))
4485    return SDValue(N, 0);
4486
4487  return SDValue();
4488}
4489
4490static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
4491  SDValue Elt = N->getOperand(i);
4492  if (Elt.getOpcode() != ISD::MERGE_VALUES)
4493    return Elt.getNode();
4494  return Elt.getOperand(Elt.getResNo()).getNode();
4495}
4496
4497/// CombineConsecutiveLoads - build_pair (load, load) -> load
4498/// if load locations are consecutive.
4499SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
4500  assert(N->getOpcode() == ISD::BUILD_PAIR);
4501
4502  LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
4503  LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
4504  if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
4505      LD1->getPointerInfo().getAddrSpace() !=
4506         LD2->getPointerInfo().getAddrSpace())
4507    return SDValue();
4508  EVT LD1VT = LD1->getValueType(0);
4509
4510  if (ISD::isNON_EXTLoad(LD2) &&
4511      LD2->hasOneUse() &&
4512      // If both are volatile this would reduce the number of volatile loads.
4513      // If one is volatile it might be ok, but play conservative and bail out.
4514      !LD1->isVolatile() &&
4515      !LD2->isVolatile() &&
4516      DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
4517    unsigned Align = LD1->getAlignment();
4518    unsigned NewAlign = TLI.getTargetData()->
4519      getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
4520
4521    if (NewAlign <= Align &&
4522        (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
4523      return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(),
4524                         LD1->getBasePtr(), LD1->getPointerInfo(),
4525                         false, false, Align);
4526  }
4527
4528  return SDValue();
4529}
4530
4531SDValue DAGCombiner::visitBITCAST(SDNode *N) {
4532  SDValue N0 = N->getOperand(0);
4533  EVT VT = N->getValueType(0);
4534
4535  // If the input is a BUILD_VECTOR with all constant elements, fold this now.
4536  // Only do this before legalize, since afterward the target may be depending
4537  // on the bitconvert.
4538  // First check to see if this is all constant.
4539  if (!LegalTypes &&
4540      N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
4541      VT.isVector()) {
4542    bool isSimple = true;
4543    for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i)
4544      if (N0.getOperand(i).getOpcode() != ISD::UNDEF &&
4545          N0.getOperand(i).getOpcode() != ISD::Constant &&
4546          N0.getOperand(i).getOpcode() != ISD::ConstantFP) {
4547        isSimple = false;
4548        break;
4549      }
4550
4551    EVT DestEltVT = N->getValueType(0).getVectorElementType();
4552    assert(!DestEltVT.isVector() &&
4553           "Element type of vector ValueType must not be vector!");
4554    if (isSimple)
4555      return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
4556  }
4557
4558  // If the input is a constant, let getNode fold it.
4559  if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
4560    SDValue Res = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, N0);
4561    if (Res.getNode() != N) {
4562      if (!LegalOperations ||
4563          TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
4564        return Res;
4565
4566      // Folding it resulted in an illegal node, and it's too late to
4567      // do that. Clean up the old node and forego the transformation.
4568      // Ideally this won't happen very often, because instcombine
4569      // and the earlier dagcombine runs (where illegal nodes are
4570      // permitted) should have folded most of them already.
4571      DAG.DeleteNode(Res.getNode());
4572    }
4573  }
4574
4575  // (conv (conv x, t1), t2) -> (conv x, t2)
4576  if (N0.getOpcode() == ISD::BITCAST)
4577    return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT,
4578                       N0.getOperand(0));
4579
4580  // fold (conv (load x)) -> (load (conv*)x)
4581  // If the resultant load doesn't need a higher alignment than the original!
4582  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
4583      // Do not change the width of a volatile load.
4584      !cast<LoadSDNode>(N0)->isVolatile() &&
4585      (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
4586    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4587    unsigned Align = TLI.getTargetData()->
4588      getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
4589    unsigned OrigAlign = LN0->getAlignment();
4590
4591    if (Align <= OrigAlign) {
4592      SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(),
4593                                 LN0->getBasePtr(), LN0->getPointerInfo(),
4594                                 LN0->isVolatile(), LN0->isNonTemporal(),
4595                                 OrigAlign);
4596      AddToWorkList(N);
4597      CombineTo(N0.getNode(),
4598                DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
4599                            N0.getValueType(), Load),
4600                Load.getValue(1));
4601      return Load;
4602    }
4603  }
4604
4605  // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
4606  // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
4607  // This often reduces constant pool loads.
4608  if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
4609      N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
4610    SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT,
4611                                  N0.getOperand(0));
4612    AddToWorkList(NewConv.getNode());
4613
4614    APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
4615    if (N0.getOpcode() == ISD::FNEG)
4616      return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT,
4617                         NewConv, DAG.getConstant(SignBit, VT));
4618    assert(N0.getOpcode() == ISD::FABS);
4619    return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
4620                       NewConv, DAG.getConstant(~SignBit, VT));
4621  }
4622
4623  // fold (bitconvert (fcopysign cst, x)) ->
4624  //         (or (and (bitconvert x), sign), (and cst, (not sign)))
4625  // Note that we don't handle (copysign x, cst) because this can always be
4626  // folded to an fneg or fabs.
4627  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
4628      isa<ConstantFPSDNode>(N0.getOperand(0)) &&
4629      VT.isInteger() && !VT.isVector()) {
4630    unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
4631    EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
4632    if (isTypeLegal(IntXVT)) {
4633      SDValue X = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
4634                              IntXVT, N0.getOperand(1));
4635      AddToWorkList(X.getNode());
4636
4637      // If X has a different width than the result/lhs, sext it or truncate it.
4638      unsigned VTWidth = VT.getSizeInBits();
4639      if (OrigXWidth < VTWidth) {
4640        X = DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, X);
4641        AddToWorkList(X.getNode());
4642      } else if (OrigXWidth > VTWidth) {
4643        // To get the sign bit in the right place, we have to shift it right
4644        // before truncating.
4645        X = DAG.getNode(ISD::SRL, X.getDebugLoc(),
4646                        X.getValueType(), X,
4647                        DAG.getConstant(OrigXWidth-VTWidth, X.getValueType()));
4648        AddToWorkList(X.getNode());
4649        X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
4650        AddToWorkList(X.getNode());
4651      }
4652
4653      APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
4654      X = DAG.getNode(ISD::AND, X.getDebugLoc(), VT,
4655                      X, DAG.getConstant(SignBit, VT));
4656      AddToWorkList(X.getNode());
4657
4658      SDValue Cst = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
4659                                VT, N0.getOperand(0));
4660      Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT,
4661                        Cst, DAG.getConstant(~SignBit, VT));
4662      AddToWorkList(Cst.getNode());
4663
4664      return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, X, Cst);
4665    }
4666  }
4667
4668  // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
4669  if (N0.getOpcode() == ISD::BUILD_PAIR) {
4670    SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT);
4671    if (CombineLD.getNode())
4672      return CombineLD;
4673  }
4674
4675  return SDValue();
4676}
4677
4678SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
4679  EVT VT = N->getValueType(0);
4680  return CombineConsecutiveLoads(N, VT);
4681}
4682
4683/// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector
4684/// node with Constant, ConstantFP or Undef operands.  DstEltVT indicates the
4685/// destination element value type.
4686SDValue DAGCombiner::
4687ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
4688  EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
4689
4690  // If this is already the right type, we're done.
4691  if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
4692
4693  unsigned SrcBitSize = SrcEltVT.getSizeInBits();
4694  unsigned DstBitSize = DstEltVT.getSizeInBits();
4695
4696  // If this is a conversion of N elements of one type to N elements of another
4697  // type, convert each element.  This handles FP<->INT cases.
4698  if (SrcBitSize == DstBitSize) {
4699    EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
4700                              BV->getValueType(0).getVectorNumElements());
4701
4702    // Due to the FP element handling below calling this routine recursively,
4703    // we can end up with a scalar-to-vector node here.
4704    if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
4705      return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
4706                         DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
4707                                     DstEltVT, BV->getOperand(0)));
4708
4709    SmallVector<SDValue, 8> Ops;
4710    for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
4711      SDValue Op = BV->getOperand(i);
4712      // If the vector element type is not legal, the BUILD_VECTOR operands
4713      // are promoted and implicitly truncated.  Make that explicit here.
4714      if (Op.getValueType() != SrcEltVT)
4715        Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op);
4716      Ops.push_back(DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
4717                                DstEltVT, Op));
4718      AddToWorkList(Ops.back().getNode());
4719    }
4720    return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
4721                       &Ops[0], Ops.size());
4722  }
4723
4724  // Otherwise, we're growing or shrinking the elements.  To avoid having to
4725  // handle annoying details of growing/shrinking FP values, we convert them to
4726  // int first.
4727  if (SrcEltVT.isFloatingPoint()) {
4728    // Convert the input float vector to a int vector where the elements are the
4729    // same sizes.
4730    assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
4731    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
4732    BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
4733    SrcEltVT = IntVT;
4734  }
4735
4736  // Now we know the input is an integer vector.  If the output is a FP type,
4737  // convert to integer first, then to FP of the right size.
4738  if (DstEltVT.isFloatingPoint()) {
4739    assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
4740    EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
4741    SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
4742
4743    // Next, convert to FP elements of the same size.
4744    return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
4745  }
4746
4747  // Okay, we know the src/dst types are both integers of differing types.
4748  // Handling growing first.
4749  assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
4750  if (SrcBitSize < DstBitSize) {
4751    unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
4752
4753    SmallVector<SDValue, 8> Ops;
4754    for (unsigned i = 0, e = BV->getNumOperands(); i != e;
4755         i += NumInputsPerOutput) {
4756      bool isLE = TLI.isLittleEndian();
4757      APInt NewBits = APInt(DstBitSize, 0);
4758      bool EltIsUndef = true;
4759      for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
4760        // Shift the previously computed bits over.
4761        NewBits <<= SrcBitSize;
4762        SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
4763        if (Op.getOpcode() == ISD::UNDEF) continue;
4764        EltIsUndef = false;
4765
4766        NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
4767                   zextOrTrunc(SrcBitSize).zext(DstBitSize);
4768      }
4769
4770      if (EltIsUndef)
4771        Ops.push_back(DAG.getUNDEF(DstEltVT));
4772      else
4773        Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
4774    }
4775
4776    EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
4777    return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
4778                       &Ops[0], Ops.size());
4779  }
4780
4781  // Finally, this must be the case where we are shrinking elements: each input
4782  // turns into multiple outputs.
4783  bool isS2V = ISD::isScalarToVector(BV);
4784  unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
4785  EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
4786                            NumOutputsPerInput*BV->getNumOperands());
4787  SmallVector<SDValue, 8> Ops;
4788
4789  for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
4790    if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
4791      for (unsigned j = 0; j != NumOutputsPerInput; ++j)
4792        Ops.push_back(DAG.getUNDEF(DstEltVT));
4793      continue;
4794    }
4795
4796    APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))->
4797                  getAPIntValue().zextOrTrunc(SrcBitSize);
4798
4799    for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
4800      APInt ThisVal = OpVal.trunc(DstBitSize);
4801      Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
4802      if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal)
4803        // Simply turn this into a SCALAR_TO_VECTOR of the new type.
4804        return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
4805                           Ops[0]);
4806      OpVal = OpVal.lshr(DstBitSize);
4807    }
4808
4809    // For big endian targets, swap the order of the pieces of each element.
4810    if (TLI.isBigEndian())
4811      std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
4812  }
4813
4814  return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
4815                     &Ops[0], Ops.size());
4816}
4817
4818SDValue DAGCombiner::visitFADD(SDNode *N) {
4819  SDValue N0 = N->getOperand(0);
4820  SDValue N1 = N->getOperand(1);
4821  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4822  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4823  EVT VT = N->getValueType(0);
4824
4825  // fold vector ops
4826  if (VT.isVector()) {
4827    SDValue FoldedVOp = SimplifyVBinOp(N);
4828    if (FoldedVOp.getNode()) return FoldedVOp;
4829  }
4830
4831  // fold (fadd c1, c2) -> (fadd c1, c2)
4832  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4833    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1);
4834  // canonicalize constant to RHS
4835  if (N0CFP && !N1CFP)
4836    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0);
4837  // fold (fadd A, 0) -> A
4838  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4839    return N0;
4840  // fold (fadd A, (fneg B)) -> (fsub A, B)
4841  if (isNegatibleForFree(N1, LegalOperations) == 2)
4842    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0,
4843                       GetNegatedExpression(N1, DAG, LegalOperations));
4844  // fold (fadd (fneg A), B) -> (fsub B, A)
4845  if (isNegatibleForFree(N0, LegalOperations) == 2)
4846    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1,
4847                       GetNegatedExpression(N0, DAG, LegalOperations));
4848
4849  // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
4850  if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD &&
4851      N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
4852    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0),
4853                       DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
4854                                   N0.getOperand(1), N1));
4855
4856  return SDValue();
4857}
4858
4859SDValue DAGCombiner::visitFSUB(SDNode *N) {
4860  SDValue N0 = N->getOperand(0);
4861  SDValue N1 = N->getOperand(1);
4862  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4863  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4864  EVT VT = N->getValueType(0);
4865
4866  // fold vector ops
4867  if (VT.isVector()) {
4868    SDValue FoldedVOp = SimplifyVBinOp(N);
4869    if (FoldedVOp.getNode()) return FoldedVOp;
4870  }
4871
4872  // fold (fsub c1, c2) -> c1-c2
4873  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4874    return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1);
4875  // fold (fsub A, 0) -> A
4876  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4877    return N0;
4878  // fold (fsub 0, B) -> -B
4879  if (UnsafeFPMath && N0CFP && N0CFP->getValueAPF().isZero()) {
4880    if (isNegatibleForFree(N1, LegalOperations))
4881      return GetNegatedExpression(N1, DAG, LegalOperations);
4882    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4883      return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N1);
4884  }
4885  // fold (fsub A, (fneg B)) -> (fadd A, B)
4886  if (isNegatibleForFree(N1, LegalOperations))
4887    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0,
4888                       GetNegatedExpression(N1, DAG, LegalOperations));
4889
4890  return SDValue();
4891}
4892
4893SDValue DAGCombiner::visitFMUL(SDNode *N) {
4894  SDValue N0 = N->getOperand(0);
4895  SDValue N1 = N->getOperand(1);
4896  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4897  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4898  EVT VT = N->getValueType(0);
4899
4900  // fold vector ops
4901  if (VT.isVector()) {
4902    SDValue FoldedVOp = SimplifyVBinOp(N);
4903    if (FoldedVOp.getNode()) return FoldedVOp;
4904  }
4905
4906  // fold (fmul c1, c2) -> c1*c2
4907  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4908    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1);
4909  // canonicalize constant to RHS
4910  if (N0CFP && !N1CFP)
4911    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0);
4912  // fold (fmul A, 0) -> 0
4913  if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
4914    return N1;
4915  // fold (fmul A, 0) -> 0, vector edition.
4916  if (UnsafeFPMath && ISD::isBuildVectorAllZeros(N1.getNode()))
4917    return N1;
4918  // fold (fmul X, 2.0) -> (fadd X, X)
4919  if (N1CFP && N1CFP->isExactlyValue(+2.0))
4920    return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0);
4921  // fold (fmul X, -1.0) -> (fneg X)
4922  if (N1CFP && N1CFP->isExactlyValue(-1.0))
4923    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
4924      return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0);
4925
4926  // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
4927  if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
4928    if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
4929      // Both can be negated for free, check to see if at least one is cheaper
4930      // negated.
4931      if (LHSNeg == 2 || RHSNeg == 2)
4932        return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
4933                           GetNegatedExpression(N0, DAG, LegalOperations),
4934                           GetNegatedExpression(N1, DAG, LegalOperations));
4935    }
4936  }
4937
4938  // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
4939  if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FMUL &&
4940      N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
4941    return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0),
4942                       DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
4943                                   N0.getOperand(1), N1));
4944
4945  return SDValue();
4946}
4947
4948SDValue DAGCombiner::visitFDIV(SDNode *N) {
4949  SDValue N0 = N->getOperand(0);
4950  SDValue N1 = N->getOperand(1);
4951  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4952  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4953  EVT VT = N->getValueType(0);
4954
4955  // fold vector ops
4956  if (VT.isVector()) {
4957    SDValue FoldedVOp = SimplifyVBinOp(N);
4958    if (FoldedVOp.getNode()) return FoldedVOp;
4959  }
4960
4961  // fold (fdiv c1, c2) -> c1/c2
4962  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4963    return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1);
4964
4965
4966  // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
4967  if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
4968    if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
4969      // Both can be negated for free, check to see if at least one is cheaper
4970      // negated.
4971      if (LHSNeg == 2 || RHSNeg == 2)
4972        return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT,
4973                           GetNegatedExpression(N0, DAG, LegalOperations),
4974                           GetNegatedExpression(N1, DAG, LegalOperations));
4975    }
4976  }
4977
4978  return SDValue();
4979}
4980
4981SDValue DAGCombiner::visitFREM(SDNode *N) {
4982  SDValue N0 = N->getOperand(0);
4983  SDValue N1 = N->getOperand(1);
4984  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4985  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4986  EVT VT = N->getValueType(0);
4987
4988  // fold (frem c1, c2) -> fmod(c1,c2)
4989  if (N0CFP && N1CFP && VT != MVT::ppcf128)
4990    return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1);
4991
4992  return SDValue();
4993}
4994
4995SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
4996  SDValue N0 = N->getOperand(0);
4997  SDValue N1 = N->getOperand(1);
4998  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
4999  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5000  EVT VT = N->getValueType(0);
5001
5002  if (N0CFP && N1CFP && VT != MVT::ppcf128)  // Constant fold
5003    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1);
5004
5005  if (N1CFP) {
5006    const APFloat& V = N1CFP->getValueAPF();
5007    // copysign(x, c1) -> fabs(x)       iff ispos(c1)
5008    // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
5009    if (!V.isNegative()) {
5010      if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
5011        return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
5012    } else {
5013      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
5014        return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT,
5015                           DAG.getNode(ISD::FABS, N0.getDebugLoc(), VT, N0));
5016    }
5017  }
5018
5019  // copysign(fabs(x), y) -> copysign(x, y)
5020  // copysign(fneg(x), y) -> copysign(x, y)
5021  // copysign(copysign(x,z), y) -> copysign(x, y)
5022  if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
5023      N0.getOpcode() == ISD::FCOPYSIGN)
5024    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
5025                       N0.getOperand(0), N1);
5026
5027  // copysign(x, abs(y)) -> abs(x)
5028  if (N1.getOpcode() == ISD::FABS)
5029    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
5030
5031  // copysign(x, copysign(y,z)) -> copysign(x, z)
5032  if (N1.getOpcode() == ISD::FCOPYSIGN)
5033    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
5034                       N0, N1.getOperand(1));
5035
5036  // copysign(x, fp_extend(y)) -> copysign(x, y)
5037  // copysign(x, fp_round(y)) -> copysign(x, y)
5038  if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)
5039    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
5040                       N0, N1.getOperand(0));
5041
5042  return SDValue();
5043}
5044
5045SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
5046  SDValue N0 = N->getOperand(0);
5047  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
5048  EVT VT = N->getValueType(0);
5049  EVT OpVT = N0.getValueType();
5050
5051  // fold (sint_to_fp c1) -> c1fp
5052  if (N0C && OpVT != MVT::ppcf128)
5053    return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
5054
5055  // If the input is a legal type, and SINT_TO_FP is not legal on this target,
5056  // but UINT_TO_FP is legal on this target, try to convert.
5057  if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) &&
5058      TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) {
5059    // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
5060    if (DAG.SignBitIsZero(N0))
5061      return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
5062  }
5063
5064  return SDValue();
5065}
5066
5067SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
5068  SDValue N0 = N->getOperand(0);
5069  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
5070  EVT VT = N->getValueType(0);
5071  EVT OpVT = N0.getValueType();
5072
5073  // fold (uint_to_fp c1) -> c1fp
5074  if (N0C && OpVT != MVT::ppcf128)
5075    return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
5076
5077  // If the input is a legal type, and UINT_TO_FP is not legal on this target,
5078  // but SINT_TO_FP is legal on this target, try to convert.
5079  if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) &&
5080      TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) {
5081    // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
5082    if (DAG.SignBitIsZero(N0))
5083      return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
5084  }
5085
5086  return SDValue();
5087}
5088
5089SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
5090  SDValue N0 = N->getOperand(0);
5091  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5092  EVT VT = N->getValueType(0);
5093
5094  // fold (fp_to_sint c1fp) -> c1
5095  if (N0CFP)
5096    return DAG.getNode(ISD::FP_TO_SINT, N->getDebugLoc(), VT, N0);
5097
5098  return SDValue();
5099}
5100
5101SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
5102  SDValue N0 = N->getOperand(0);
5103  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5104  EVT VT = N->getValueType(0);
5105
5106  // fold (fp_to_uint c1fp) -> c1
5107  if (N0CFP && VT != MVT::ppcf128)
5108    return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0);
5109
5110  return SDValue();
5111}
5112
5113SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
5114  SDValue N0 = N->getOperand(0);
5115  SDValue N1 = N->getOperand(1);
5116  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5117  EVT VT = N->getValueType(0);
5118
5119  // fold (fp_round c1fp) -> c1fp
5120  if (N0CFP && N0.getValueType() != MVT::ppcf128)
5121    return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1);
5122
5123  // fold (fp_round (fp_extend x)) -> x
5124  if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
5125    return N0.getOperand(0);
5126
5127  // fold (fp_round (fp_round x)) -> (fp_round x)
5128  if (N0.getOpcode() == ISD::FP_ROUND) {
5129    // This is a value preserving truncation if both round's are.
5130    bool IsTrunc = N->getConstantOperandVal(1) == 1 &&
5131                   N0.getNode()->getConstantOperandVal(1) == 1;
5132    return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0.getOperand(0),
5133                       DAG.getIntPtrConstant(IsTrunc));
5134  }
5135
5136  // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
5137  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
5138    SDValue Tmp = DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), VT,
5139                              N0.getOperand(0), N1);
5140    AddToWorkList(Tmp.getNode());
5141    return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT,
5142                       Tmp, N0.getOperand(1));
5143  }
5144
5145  return SDValue();
5146}
5147
5148SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
5149  SDValue N0 = N->getOperand(0);
5150  EVT VT = N->getValueType(0);
5151  EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
5152  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5153
5154  // fold (fp_round_inreg c1fp) -> c1fp
5155  if (N0CFP && isTypeLegal(EVT)) {
5156    SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT);
5157    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round);
5158  }
5159
5160  return SDValue();
5161}
5162
5163SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
5164  SDValue N0 = N->getOperand(0);
5165  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5166  EVT VT = N->getValueType(0);
5167
5168  // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
5169  if (N->hasOneUse() &&
5170      N->use_begin()->getOpcode() == ISD::FP_ROUND)
5171    return SDValue();
5172
5173  // fold (fp_extend c1fp) -> c1fp
5174  if (N0CFP && VT != MVT::ppcf128)
5175    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0);
5176
5177  // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
5178  // value of X.
5179  if (N0.getOpcode() == ISD::FP_ROUND
5180      && N0.getNode()->getConstantOperandVal(1) == 1) {
5181    SDValue In = N0.getOperand(0);
5182    if (In.getValueType() == VT) return In;
5183    if (VT.bitsLT(In.getValueType()))
5184      return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT,
5185                         In, N0.getOperand(1));
5186    return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, In);
5187  }
5188
5189  // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
5190  if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() &&
5191      ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
5192       TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
5193    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5194    SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
5195                                     LN0->getChain(),
5196                                     LN0->getBasePtr(), LN0->getPointerInfo(),
5197                                     N0.getValueType(),
5198                                     LN0->isVolatile(), LN0->isNonTemporal(),
5199                                     LN0->getAlignment());
5200    CombineTo(N, ExtLoad);
5201    CombineTo(N0.getNode(),
5202              DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(),
5203                          N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)),
5204              ExtLoad.getValue(1));
5205    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
5206  }
5207
5208  return SDValue();
5209}
5210
5211SDValue DAGCombiner::visitFNEG(SDNode *N) {
5212  SDValue N0 = N->getOperand(0);
5213  EVT VT = N->getValueType(0);
5214
5215  if (isNegatibleForFree(N0, LegalOperations))
5216    return GetNegatedExpression(N0, DAG, LegalOperations);
5217
5218  // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
5219  // constant pool values.
5220  if (N0.getOpcode() == ISD::BITCAST &&
5221      !VT.isVector() &&
5222      N0.getNode()->hasOneUse() &&
5223      N0.getOperand(0).getValueType().isInteger()) {
5224    SDValue Int = N0.getOperand(0);
5225    EVT IntVT = Int.getValueType();
5226    if (IntVT.isInteger() && !IntVT.isVector()) {
5227      Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
5228              DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
5229      AddToWorkList(Int.getNode());
5230      return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
5231                         VT, Int);
5232    }
5233  }
5234
5235  return SDValue();
5236}
5237
5238SDValue DAGCombiner::visitFABS(SDNode *N) {
5239  SDValue N0 = N->getOperand(0);
5240  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5241  EVT VT = N->getValueType(0);
5242
5243  // fold (fabs c1) -> fabs(c1)
5244  if (N0CFP && VT != MVT::ppcf128)
5245    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
5246  // fold (fabs (fabs x)) -> (fabs x)
5247  if (N0.getOpcode() == ISD::FABS)
5248    return N->getOperand(0);
5249  // fold (fabs (fneg x)) -> (fabs x)
5250  // fold (fabs (fcopysign x, y)) -> (fabs x)
5251  if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
5252    return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0.getOperand(0));
5253
5254  // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
5255  // constant pool values.
5256  if (N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
5257      N0.getOperand(0).getValueType().isInteger() &&
5258      !N0.getOperand(0).getValueType().isVector()) {
5259    SDValue Int = N0.getOperand(0);
5260    EVT IntVT = Int.getValueType();
5261    if (IntVT.isInteger() && !IntVT.isVector()) {
5262      Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
5263             DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
5264      AddToWorkList(Int.getNode());
5265      return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
5266                         N->getValueType(0), Int);
5267    }
5268  }
5269
5270  return SDValue();
5271}
5272
5273SDValue DAGCombiner::visitBRCOND(SDNode *N) {
5274  SDValue Chain = N->getOperand(0);
5275  SDValue N1 = N->getOperand(1);
5276  SDValue N2 = N->getOperand(2);
5277
5278  // If N is a constant we could fold this into a fallthrough or unconditional
5279  // branch. However that doesn't happen very often in normal code, because
5280  // Instcombine/SimplifyCFG should have handled the available opportunities.
5281  // If we did this folding here, it would be necessary to update the
5282  // MachineBasicBlock CFG, which is awkward.
5283
5284  // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
5285  // on the target.
5286  if (N1.getOpcode() == ISD::SETCC &&
5287      TLI.isOperationLegalOrCustom(ISD::BR_CC, MVT::Other)) {
5288    return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
5289                       Chain, N1.getOperand(2),
5290                       N1.getOperand(0), N1.getOperand(1), N2);
5291  }
5292
5293  if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) ||
5294      ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) &&
5295       (N1.getOperand(0).hasOneUse() &&
5296        N1.getOperand(0).getOpcode() == ISD::SRL))) {
5297    SDNode *Trunc = 0;
5298    if (N1.getOpcode() == ISD::TRUNCATE) {
5299      // Look pass the truncate.
5300      Trunc = N1.getNode();
5301      N1 = N1.getOperand(0);
5302    }
5303
5304    // Match this pattern so that we can generate simpler code:
5305    //
5306    //   %a = ...
5307    //   %b = and i32 %a, 2
5308    //   %c = srl i32 %b, 1
5309    //   brcond i32 %c ...
5310    //
5311    // into
5312    //
5313    //   %a = ...
5314    //   %b = and i32 %a, 2
5315    //   %c = setcc eq %b, 0
5316    //   brcond %c ...
5317    //
5318    // This applies only when the AND constant value has one bit set and the
5319    // SRL constant is equal to the log2 of the AND constant. The back-end is
5320    // smart enough to convert the result into a TEST/JMP sequence.
5321    SDValue Op0 = N1.getOperand(0);
5322    SDValue Op1 = N1.getOperand(1);
5323
5324    if (Op0.getOpcode() == ISD::AND &&
5325        Op1.getOpcode() == ISD::Constant) {
5326      SDValue AndOp1 = Op0.getOperand(1);
5327
5328      if (AndOp1.getOpcode() == ISD::Constant) {
5329        const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
5330
5331        if (AndConst.isPowerOf2() &&
5332            cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) {
5333          SDValue SetCC =
5334            DAG.getSetCC(N->getDebugLoc(),
5335                         TLI.getSetCCResultType(Op0.getValueType()),
5336                         Op0, DAG.getConstant(0, Op0.getValueType()),
5337                         ISD::SETNE);
5338
5339          SDValue NewBRCond = DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
5340                                          MVT::Other, Chain, SetCC, N2);
5341          // Don't add the new BRCond into the worklist or else SimplifySelectCC
5342          // will convert it back to (X & C1) >> C2.
5343          CombineTo(N, NewBRCond, false);
5344          // Truncate is dead.
5345          if (Trunc) {
5346            removeFromWorkList(Trunc);
5347            DAG.DeleteNode(Trunc);
5348          }
5349          // Replace the uses of SRL with SETCC
5350          WorkListRemover DeadNodes(*this);
5351          DAG.ReplaceAllUsesOfValueWith(N1, SetCC, &DeadNodes);
5352          removeFromWorkList(N1.getNode());
5353          DAG.DeleteNode(N1.getNode());
5354          return SDValue(N, 0);   // Return N so it doesn't get rechecked!
5355        }
5356      }
5357    }
5358
5359    if (Trunc)
5360      // Restore N1 if the above transformation doesn't match.
5361      N1 = N->getOperand(1);
5362  }
5363
5364  // Transform br(xor(x, y)) -> br(x != y)
5365  // Transform br(xor(xor(x,y), 1)) -> br (x == y)
5366  if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) {
5367    SDNode *TheXor = N1.getNode();
5368    SDValue Op0 = TheXor->getOperand(0);
5369    SDValue Op1 = TheXor->getOperand(1);
5370    if (Op0.getOpcode() == Op1.getOpcode()) {
5371      // Avoid missing important xor optimizations.
5372      SDValue Tmp = visitXOR(TheXor);
5373      if (Tmp.getNode() && Tmp.getNode() != TheXor) {
5374        DEBUG(dbgs() << "\nReplacing.8 ";
5375              TheXor->dump(&DAG);
5376              dbgs() << "\nWith: ";
5377              Tmp.getNode()->dump(&DAG);
5378              dbgs() << '\n');
5379        WorkListRemover DeadNodes(*this);
5380        DAG.ReplaceAllUsesOfValueWith(N1, Tmp, &DeadNodes);
5381        removeFromWorkList(TheXor);
5382        DAG.DeleteNode(TheXor);
5383        return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
5384                           MVT::Other, Chain, Tmp, N2);
5385      }
5386    }
5387
5388    if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
5389      bool Equal = false;
5390      if (ConstantSDNode *RHSCI = dyn_cast<ConstantSDNode>(Op0))
5391        if (RHSCI->getAPIntValue() == 1 && Op0.hasOneUse() &&
5392            Op0.getOpcode() == ISD::XOR) {
5393          TheXor = Op0.getNode();
5394          Equal = true;
5395        }
5396
5397      EVT SetCCVT = N1.getValueType();
5398      if (LegalTypes)
5399        SetCCVT = TLI.getSetCCResultType(SetCCVT);
5400      SDValue SetCC = DAG.getSetCC(TheXor->getDebugLoc(),
5401                                   SetCCVT,
5402                                   Op0, Op1,
5403                                   Equal ? ISD::SETEQ : ISD::SETNE);
5404      // Replace the uses of XOR with SETCC
5405      WorkListRemover DeadNodes(*this);
5406      DAG.ReplaceAllUsesOfValueWith(N1, SetCC, &DeadNodes);
5407      removeFromWorkList(N1.getNode());
5408      DAG.DeleteNode(N1.getNode());
5409      return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
5410                         MVT::Other, Chain, SetCC, N2);
5411    }
5412  }
5413
5414  return SDValue();
5415}
5416
5417// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
5418//
5419SDValue DAGCombiner::visitBR_CC(SDNode *N) {
5420  CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
5421  SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
5422
5423  // If N is a constant we could fold this into a fallthrough or unconditional
5424  // branch. However that doesn't happen very often in normal code, because
5425  // Instcombine/SimplifyCFG should have handled the available opportunities.
5426  // If we did this folding here, it would be necessary to update the
5427  // MachineBasicBlock CFG, which is awkward.
5428
5429  // Use SimplifySetCC to simplify SETCC's.
5430  SDValue Simp = SimplifySetCC(TLI.getSetCCResultType(CondLHS.getValueType()),
5431                               CondLHS, CondRHS, CC->get(), N->getDebugLoc(),
5432                               false);
5433  if (Simp.getNode()) AddToWorkList(Simp.getNode());
5434
5435  // fold to a simpler setcc
5436  if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
5437    return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
5438                       N->getOperand(0), Simp.getOperand(2),
5439                       Simp.getOperand(0), Simp.getOperand(1),
5440                       N->getOperand(4));
5441
5442  return SDValue();
5443}
5444
5445/// CombineToPreIndexedLoadStore - Try turning a load / store into a
5446/// pre-indexed load / store when the base pointer is an add or subtract
5447/// and it has other uses besides the load / store. After the
5448/// transformation, the new indexed load / store has effectively folded
5449/// the add / subtract in and all of its other uses are redirected to the
5450/// new load / store.
5451bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
5452  if (!LegalOperations)
5453    return false;
5454
5455  bool isLoad = true;
5456  SDValue Ptr;
5457  EVT VT;
5458  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
5459    if (LD->isIndexed())
5460      return false;
5461    VT = LD->getMemoryVT();
5462    if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
5463        !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
5464      return false;
5465    Ptr = LD->getBasePtr();
5466  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
5467    if (ST->isIndexed())
5468      return false;
5469    VT = ST->getMemoryVT();
5470    if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
5471        !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
5472      return false;
5473    Ptr = ST->getBasePtr();
5474    isLoad = false;
5475  } else {
5476    return false;
5477  }
5478
5479  // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
5480  // out.  There is no reason to make this a preinc/predec.
5481  if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
5482      Ptr.getNode()->hasOneUse())
5483    return false;
5484
5485  // Ask the target to do addressing mode selection.
5486  SDValue BasePtr;
5487  SDValue Offset;
5488  ISD::MemIndexedMode AM = ISD::UNINDEXED;
5489  if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
5490    return false;
5491  // Don't create a indexed load / store with zero offset.
5492  if (isa<ConstantSDNode>(Offset) &&
5493      cast<ConstantSDNode>(Offset)->isNullValue())
5494    return false;
5495
5496  // Try turning it into a pre-indexed load / store except when:
5497  // 1) The new base ptr is a frame index.
5498  // 2) If N is a store and the new base ptr is either the same as or is a
5499  //    predecessor of the value being stored.
5500  // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
5501  //    that would create a cycle.
5502  // 4) All uses are load / store ops that use it as old base ptr.
5503
5504  // Check #1.  Preinc'ing a frame index would require copying the stack pointer
5505  // (plus the implicit offset) to a register to preinc anyway.
5506  if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
5507    return false;
5508
5509  // Check #2.
5510  if (!isLoad) {
5511    SDValue Val = cast<StoreSDNode>(N)->getValue();
5512    if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode()))
5513      return false;
5514  }
5515
5516  // Now check for #3 and #4.
5517  bool RealUse = false;
5518  for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
5519         E = Ptr.getNode()->use_end(); I != E; ++I) {
5520    SDNode *Use = *I;
5521    if (Use == N)
5522      continue;
5523    if (Use->isPredecessorOf(N))
5524      return false;
5525
5526    if (!((Use->getOpcode() == ISD::LOAD &&
5527           cast<LoadSDNode>(Use)->getBasePtr() == Ptr) ||
5528          (Use->getOpcode() == ISD::STORE &&
5529           cast<StoreSDNode>(Use)->getBasePtr() == Ptr)))
5530      RealUse = true;
5531  }
5532
5533  if (!RealUse)
5534    return false;
5535
5536  SDValue Result;
5537  if (isLoad)
5538    Result = DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(),
5539                                BasePtr, Offset, AM);
5540  else
5541    Result = DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(),
5542                                 BasePtr, Offset, AM);
5543  ++PreIndexedNodes;
5544  ++NodesCombined;
5545  DEBUG(dbgs() << "\nReplacing.4 ";
5546        N->dump(&DAG);
5547        dbgs() << "\nWith: ";
5548        Result.getNode()->dump(&DAG);
5549        dbgs() << '\n');
5550  WorkListRemover DeadNodes(*this);
5551  if (isLoad) {
5552    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
5553                                  &DeadNodes);
5554    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
5555                                  &DeadNodes);
5556  } else {
5557    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
5558                                  &DeadNodes);
5559  }
5560
5561  // Finally, since the node is now dead, remove it from the graph.
5562  DAG.DeleteNode(N);
5563
5564  // Replace the uses of Ptr with uses of the updated base value.
5565  DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0),
5566                                &DeadNodes);
5567  removeFromWorkList(Ptr.getNode());
5568  DAG.DeleteNode(Ptr.getNode());
5569
5570  return true;
5571}
5572
5573/// CombineToPostIndexedLoadStore - Try to combine a load / store with a
5574/// add / sub of the base pointer node into a post-indexed load / store.
5575/// The transformation folded the add / subtract into the new indexed
5576/// load / store effectively and all of its uses are redirected to the
5577/// new load / store.
5578bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
5579  if (!LegalOperations)
5580    return false;
5581
5582  bool isLoad = true;
5583  SDValue Ptr;
5584  EVT VT;
5585  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
5586    if (LD->isIndexed())
5587      return false;
5588    VT = LD->getMemoryVT();
5589    if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
5590        !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
5591      return false;
5592    Ptr = LD->getBasePtr();
5593  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
5594    if (ST->isIndexed())
5595      return false;
5596    VT = ST->getMemoryVT();
5597    if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
5598        !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
5599      return false;
5600    Ptr = ST->getBasePtr();
5601    isLoad = false;
5602  } else {
5603    return false;
5604  }
5605
5606  if (Ptr.getNode()->hasOneUse())
5607    return false;
5608
5609  for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
5610         E = Ptr.getNode()->use_end(); I != E; ++I) {
5611    SDNode *Op = *I;
5612    if (Op == N ||
5613        (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
5614      continue;
5615
5616    SDValue BasePtr;
5617    SDValue Offset;
5618    ISD::MemIndexedMode AM = ISD::UNINDEXED;
5619    if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
5620      // Don't create a indexed load / store with zero offset.
5621      if (isa<ConstantSDNode>(Offset) &&
5622          cast<ConstantSDNode>(Offset)->isNullValue())
5623        continue;
5624
5625      // Try turning it into a post-indexed load / store except when
5626      // 1) All uses are load / store ops that use it as base ptr.
5627      // 2) Op must be independent of N, i.e. Op is neither a predecessor
5628      //    nor a successor of N. Otherwise, if Op is folded that would
5629      //    create a cycle.
5630
5631      if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
5632        continue;
5633
5634      // Check for #1.
5635      bool TryNext = false;
5636      for (SDNode::use_iterator II = BasePtr.getNode()->use_begin(),
5637             EE = BasePtr.getNode()->use_end(); II != EE; ++II) {
5638        SDNode *Use = *II;
5639        if (Use == Ptr.getNode())
5640          continue;
5641
5642        // If all the uses are load / store addresses, then don't do the
5643        // transformation.
5644        if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
5645          bool RealUse = false;
5646          for (SDNode::use_iterator III = Use->use_begin(),
5647                 EEE = Use->use_end(); III != EEE; ++III) {
5648            SDNode *UseUse = *III;
5649            if (!((UseUse->getOpcode() == ISD::LOAD &&
5650                   cast<LoadSDNode>(UseUse)->getBasePtr().getNode() == Use) ||
5651                  (UseUse->getOpcode() == ISD::STORE &&
5652                   cast<StoreSDNode>(UseUse)->getBasePtr().getNode() == Use)))
5653              RealUse = true;
5654          }
5655
5656          if (!RealUse) {
5657            TryNext = true;
5658            break;
5659          }
5660        }
5661      }
5662
5663      if (TryNext)
5664        continue;
5665
5666      // Check for #2
5667      if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
5668        SDValue Result = isLoad
5669          ? DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(),
5670                               BasePtr, Offset, AM)
5671          : DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(),
5672                                BasePtr, Offset, AM);
5673        ++PostIndexedNodes;
5674        ++NodesCombined;
5675        DEBUG(dbgs() << "\nReplacing.5 ";
5676              N->dump(&DAG);
5677              dbgs() << "\nWith: ";
5678              Result.getNode()->dump(&DAG);
5679              dbgs() << '\n');
5680        WorkListRemover DeadNodes(*this);
5681        if (isLoad) {
5682          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
5683                                        &DeadNodes);
5684          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
5685                                        &DeadNodes);
5686        } else {
5687          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
5688                                        &DeadNodes);
5689        }
5690
5691        // Finally, since the node is now dead, remove it from the graph.
5692        DAG.DeleteNode(N);
5693
5694        // Replace the uses of Use with uses of the updated base value.
5695        DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
5696                                      Result.getValue(isLoad ? 1 : 0),
5697                                      &DeadNodes);
5698        removeFromWorkList(Op);
5699        DAG.DeleteNode(Op);
5700        return true;
5701      }
5702    }
5703  }
5704
5705  return false;
5706}
5707
5708SDValue DAGCombiner::visitLOAD(SDNode *N) {
5709  LoadSDNode *LD  = cast<LoadSDNode>(N);
5710  SDValue Chain = LD->getChain();
5711  SDValue Ptr   = LD->getBasePtr();
5712
5713  // If load is not volatile and there are no uses of the loaded value (and
5714  // the updated indexed value in case of indexed loads), change uses of the
5715  // chain value into uses of the chain input (i.e. delete the dead load).
5716  if (!LD->isVolatile()) {
5717    if (N->getValueType(1) == MVT::Other) {
5718      // Unindexed loads.
5719      if (N->hasNUsesOfValue(0, 0)) {
5720        // It's not safe to use the two value CombineTo variant here. e.g.
5721        // v1, chain2 = load chain1, loc
5722        // v2, chain3 = load chain2, loc
5723        // v3         = add v2, c
5724        // Now we replace use of chain2 with chain1.  This makes the second load
5725        // isomorphic to the one we are deleting, and thus makes this load live.
5726        DEBUG(dbgs() << "\nReplacing.6 ";
5727              N->dump(&DAG);
5728              dbgs() << "\nWith chain: ";
5729              Chain.getNode()->dump(&DAG);
5730              dbgs() << "\n");
5731        WorkListRemover DeadNodes(*this);
5732        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain, &DeadNodes);
5733
5734        if (N->use_empty()) {
5735          removeFromWorkList(N);
5736          DAG.DeleteNode(N);
5737        }
5738
5739        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
5740      }
5741    } else {
5742      // Indexed loads.
5743      assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
5744      if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
5745        SDValue Undef = DAG.getUNDEF(N->getValueType(0));
5746        DEBUG(dbgs() << "\nReplacing.7 ";
5747              N->dump(&DAG);
5748              dbgs() << "\nWith: ";
5749              Undef.getNode()->dump(&DAG);
5750              dbgs() << " and 2 other values\n");
5751        WorkListRemover DeadNodes(*this);
5752        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef, &DeadNodes);
5753        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1),
5754                                      DAG.getUNDEF(N->getValueType(1)),
5755                                      &DeadNodes);
5756        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain, &DeadNodes);
5757        removeFromWorkList(N);
5758        DAG.DeleteNode(N);
5759        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
5760      }
5761    }
5762  }
5763
5764  // If this load is directly stored, replace the load value with the stored
5765  // value.
5766  // TODO: Handle store large -> read small portion.
5767  // TODO: Handle TRUNCSTORE/LOADEXT
5768  if (LD->getExtensionType() == ISD::NON_EXTLOAD &&
5769      !LD->isVolatile()) {
5770    if (ISD::isNON_TRUNCStore(Chain.getNode())) {
5771      StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
5772      if (PrevST->getBasePtr() == Ptr &&
5773          PrevST->getValue().getValueType() == N->getValueType(0))
5774      return CombineTo(N, Chain.getOperand(1), Chain);
5775    }
5776  }
5777
5778  // Try to infer better alignment information than the load already has.
5779  if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
5780    if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
5781      if (Align > LD->getAlignment())
5782        return DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
5783                              N->getDebugLoc(),
5784                              Chain, Ptr, LD->getPointerInfo(),
5785                              LD->getMemoryVT(),
5786                              LD->isVolatile(), LD->isNonTemporal(), Align);
5787    }
5788  }
5789
5790  if (CombinerAA) {
5791    // Walk up chain skipping non-aliasing memory nodes.
5792    SDValue BetterChain = FindBetterChain(N, Chain);
5793
5794    // If there is a better chain.
5795    if (Chain != BetterChain) {
5796      SDValue ReplLoad;
5797
5798      // Replace the chain to void dependency.
5799      if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
5800        ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(),
5801                               BetterChain, Ptr, LD->getPointerInfo(),
5802                               LD->isVolatile(), LD->isNonTemporal(),
5803                               LD->getAlignment());
5804      } else {
5805        ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
5806                                  LD->getDebugLoc(),
5807                                  BetterChain, Ptr, LD->getPointerInfo(),
5808                                  LD->getMemoryVT(),
5809                                  LD->isVolatile(),
5810                                  LD->isNonTemporal(),
5811                                  LD->getAlignment());
5812      }
5813
5814      // Create token factor to keep old chain connected.
5815      SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
5816                                  MVT::Other, Chain, ReplLoad.getValue(1));
5817
5818      // Make sure the new and old chains are cleaned up.
5819      AddToWorkList(Token.getNode());
5820
5821      // Replace uses with load result and token factor. Don't add users
5822      // to work list.
5823      return CombineTo(N, ReplLoad.getValue(0), Token, false);
5824    }
5825  }
5826
5827  // Try transforming N to an indexed load.
5828  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
5829    return SDValue(N, 0);
5830
5831  return SDValue();
5832}
5833
5834/// CheckForMaskedLoad - Check to see if V is (and load (ptr), imm), where the
5835/// load is having specific bytes cleared out.  If so, return the byte size
5836/// being masked out and the shift amount.
5837static std::pair<unsigned, unsigned>
5838CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
5839  std::pair<unsigned, unsigned> Result(0, 0);
5840
5841  // Check for the structure we're looking for.
5842  if (V->getOpcode() != ISD::AND ||
5843      !isa<ConstantSDNode>(V->getOperand(1)) ||
5844      !ISD::isNormalLoad(V->getOperand(0).getNode()))
5845    return Result;
5846
5847  // Check the chain and pointer.
5848  LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
5849  if (LD->getBasePtr() != Ptr) return Result;  // Not from same pointer.
5850
5851  // The store should be chained directly to the load or be an operand of a
5852  // tokenfactor.
5853  if (LD == Chain.getNode())
5854    ; // ok.
5855  else if (Chain->getOpcode() != ISD::TokenFactor)
5856    return Result; // Fail.
5857  else {
5858    bool isOk = false;
5859    for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i)
5860      if (Chain->getOperand(i).getNode() == LD) {
5861        isOk = true;
5862        break;
5863      }
5864    if (!isOk) return Result;
5865  }
5866
5867  // This only handles simple types.
5868  if (V.getValueType() != MVT::i16 &&
5869      V.getValueType() != MVT::i32 &&
5870      V.getValueType() != MVT::i64)
5871    return Result;
5872
5873  // Check the constant mask.  Invert it so that the bits being masked out are
5874  // 0 and the bits being kept are 1.  Use getSExtValue so that leading bits
5875  // follow the sign bit for uniformity.
5876  uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
5877  unsigned NotMaskLZ = CountLeadingZeros_64(NotMask);
5878  if (NotMaskLZ & 7) return Result;  // Must be multiple of a byte.
5879  unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
5880  if (NotMaskTZ & 7) return Result;  // Must be multiple of a byte.
5881  if (NotMaskLZ == 64) return Result;  // All zero mask.
5882
5883  // See if we have a continuous run of bits.  If so, we have 0*1+0*
5884  if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
5885    return Result;
5886
5887  // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
5888  if (V.getValueType() != MVT::i64 && NotMaskLZ)
5889    NotMaskLZ -= 64-V.getValueSizeInBits();
5890
5891  unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
5892  switch (MaskedBytes) {
5893  case 1:
5894  case 2:
5895  case 4: break;
5896  default: return Result; // All one mask, or 5-byte mask.
5897  }
5898
5899  // Verify that the first bit starts at a multiple of mask so that the access
5900  // is aligned the same as the access width.
5901  if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
5902
5903  Result.first = MaskedBytes;
5904  Result.second = NotMaskTZ/8;
5905  return Result;
5906}
5907
5908
5909/// ShrinkLoadReplaceStoreWithStore - Check to see if IVal is something that
5910/// provides a value as specified by MaskInfo.  If so, replace the specified
5911/// store with a narrower store of truncated IVal.
5912static SDNode *
5913ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
5914                                SDValue IVal, StoreSDNode *St,
5915                                DAGCombiner *DC) {
5916  unsigned NumBytes = MaskInfo.first;
5917  unsigned ByteShift = MaskInfo.second;
5918  SelectionDAG &DAG = DC->getDAG();
5919
5920  // Check to see if IVal is all zeros in the part being masked in by the 'or'
5921  // that uses this.  If not, this is not a replacement.
5922  APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
5923                                  ByteShift*8, (ByteShift+NumBytes)*8);
5924  if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
5925
5926  // Check that it is legal on the target to do this.  It is legal if the new
5927  // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
5928  // legalization.
5929  MVT VT = MVT::getIntegerVT(NumBytes*8);
5930  if (!DC->isTypeLegal(VT))
5931    return 0;
5932
5933  // Okay, we can do this!  Replace the 'St' store with a store of IVal that is
5934  // shifted by ByteShift and truncated down to NumBytes.
5935  if (ByteShift)
5936    IVal = DAG.getNode(ISD::SRL, IVal->getDebugLoc(), IVal.getValueType(), IVal,
5937                       DAG.getConstant(ByteShift*8, DC->getShiftAmountTy()));
5938
5939  // Figure out the offset for the store and the alignment of the access.
5940  unsigned StOffset;
5941  unsigned NewAlign = St->getAlignment();
5942
5943  if (DAG.getTargetLoweringInfo().isLittleEndian())
5944    StOffset = ByteShift;
5945  else
5946    StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
5947
5948  SDValue Ptr = St->getBasePtr();
5949  if (StOffset) {
5950    Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(),
5951                      Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
5952    NewAlign = MinAlign(NewAlign, StOffset);
5953  }
5954
5955  // Truncate down to the new size.
5956  IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal);
5957
5958  ++OpsNarrowed;
5959  return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
5960                      St->getPointerInfo().getWithOffset(StOffset),
5961                      false, false, NewAlign).getNode();
5962}
5963
5964
5965/// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is
5966/// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some
5967/// of the loaded bits, try narrowing the load and store if it would end up
5968/// being a win for performance or code size.
5969SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
5970  StoreSDNode *ST  = cast<StoreSDNode>(N);
5971  if (ST->isVolatile())
5972    return SDValue();
5973
5974  SDValue Chain = ST->getChain();
5975  SDValue Value = ST->getValue();
5976  SDValue Ptr   = ST->getBasePtr();
5977  EVT VT = Value.getValueType();
5978
5979  if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
5980    return SDValue();
5981
5982  unsigned Opc = Value.getOpcode();
5983
5984  // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
5985  // is a byte mask indicating a consecutive number of bytes, check to see if
5986  // Y is known to provide just those bytes.  If so, we try to replace the
5987  // load + replace + store sequence with a single (narrower) store, which makes
5988  // the load dead.
5989  if (Opc == ISD::OR) {
5990    std::pair<unsigned, unsigned> MaskedLoad;
5991    MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
5992    if (MaskedLoad.first)
5993      if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
5994                                                  Value.getOperand(1), ST,this))
5995        return SDValue(NewST, 0);
5996
5997    // Or is commutative, so try swapping X and Y.
5998    MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
5999    if (MaskedLoad.first)
6000      if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
6001                                                  Value.getOperand(0), ST,this))
6002        return SDValue(NewST, 0);
6003  }
6004
6005  if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
6006      Value.getOperand(1).getOpcode() != ISD::Constant)
6007    return SDValue();
6008
6009  SDValue N0 = Value.getOperand(0);
6010  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
6011      Chain == SDValue(N0.getNode(), 1)) {
6012    LoadSDNode *LD = cast<LoadSDNode>(N0);
6013    if (LD->getBasePtr() != Ptr ||
6014        LD->getPointerInfo().getAddrSpace() !=
6015        ST->getPointerInfo().getAddrSpace())
6016      return SDValue();
6017
6018    // Find the type to narrow it the load / op / store to.
6019    SDValue N1 = Value.getOperand(1);
6020    unsigned BitWidth = N1.getValueSizeInBits();
6021    APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
6022    if (Opc == ISD::AND)
6023      Imm ^= APInt::getAllOnesValue(BitWidth);
6024    if (Imm == 0 || Imm.isAllOnesValue())
6025      return SDValue();
6026    unsigned ShAmt = Imm.countTrailingZeros();
6027    unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
6028    unsigned NewBW = NextPowerOf2(MSB - ShAmt);
6029    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
6030    while (NewBW < BitWidth &&
6031           !(TLI.isOperationLegalOrCustom(Opc, NewVT) &&
6032             TLI.isNarrowingProfitable(VT, NewVT))) {
6033      NewBW = NextPowerOf2(NewBW);
6034      NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
6035    }
6036    if (NewBW >= BitWidth)
6037      return SDValue();
6038
6039    // If the lsb changed does not start at the type bitwidth boundary,
6040    // start at the previous one.
6041    if (ShAmt % NewBW)
6042      ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
6043    APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, ShAmt + NewBW);
6044    if ((Imm & Mask) == Imm) {
6045      APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
6046      if (Opc == ISD::AND)
6047        NewImm ^= APInt::getAllOnesValue(NewBW);
6048      uint64_t PtrOff = ShAmt / 8;
6049      // For big endian targets, we need to adjust the offset to the pointer to
6050      // load the correct bytes.
6051      if (TLI.isBigEndian())
6052        PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
6053
6054      unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
6055      const Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
6056      if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
6057        return SDValue();
6058
6059      SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
6060                                   Ptr.getValueType(), Ptr,
6061                                   DAG.getConstant(PtrOff, Ptr.getValueType()));
6062      SDValue NewLD = DAG.getLoad(NewVT, N0.getDebugLoc(),
6063                                  LD->getChain(), NewPtr,
6064                                  LD->getPointerInfo().getWithOffset(PtrOff),
6065                                  LD->isVolatile(), LD->isNonTemporal(),
6066                                  NewAlign);
6067      SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD,
6068                                   DAG.getConstant(NewImm, NewVT));
6069      SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(),
6070                                   NewVal, NewPtr,
6071                                   ST->getPointerInfo().getWithOffset(PtrOff),
6072                                   false, false, NewAlign);
6073
6074      AddToWorkList(NewPtr.getNode());
6075      AddToWorkList(NewLD.getNode());
6076      AddToWorkList(NewVal.getNode());
6077      WorkListRemover DeadNodes(*this);
6078      DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1),
6079                                    &DeadNodes);
6080      ++OpsNarrowed;
6081      return NewST;
6082    }
6083  }
6084
6085  return SDValue();
6086}
6087
6088SDValue DAGCombiner::visitSTORE(SDNode *N) {
6089  StoreSDNode *ST  = cast<StoreSDNode>(N);
6090  SDValue Chain = ST->getChain();
6091  SDValue Value = ST->getValue();
6092  SDValue Ptr   = ST->getBasePtr();
6093
6094  // If this is a store of a bit convert, store the input value if the
6095  // resultant store does not need a higher alignment than the original.
6096  if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
6097      ST->isUnindexed()) {
6098    unsigned OrigAlign = ST->getAlignment();
6099    EVT SVT = Value.getOperand(0).getValueType();
6100    unsigned Align = TLI.getTargetData()->
6101      getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
6102    if (Align <= OrigAlign &&
6103        ((!LegalOperations && !ST->isVolatile()) ||
6104         TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
6105      return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0),
6106                          Ptr, ST->getPointerInfo(), ST->isVolatile(),
6107                          ST->isNonTemporal(), OrigAlign);
6108  }
6109
6110  // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
6111  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
6112    // NOTE: If the original store is volatile, this transform must not increase
6113    // the number of stores.  For example, on x86-32 an f64 can be stored in one
6114    // processor operation but an i64 (which is not legal) requires two.  So the
6115    // transform should not be done in this case.
6116    if (Value.getOpcode() != ISD::TargetConstantFP) {
6117      SDValue Tmp;
6118      switch (CFP->getValueType(0).getSimpleVT().SimpleTy) {
6119      default: llvm_unreachable("Unknown FP type");
6120      case MVT::f80:    // We don't do this for these yet.
6121      case MVT::f128:
6122      case MVT::ppcf128:
6123        break;
6124      case MVT::f32:
6125        if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
6126            TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
6127          Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
6128                              bitcastToAPInt().getZExtValue(), MVT::i32);
6129          return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
6130                              Ptr, ST->getPointerInfo(), ST->isVolatile(),
6131                              ST->isNonTemporal(), ST->getAlignment());
6132        }
6133        break;
6134      case MVT::f64:
6135        if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
6136             !ST->isVolatile()) ||
6137            TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
6138          Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
6139                                getZExtValue(), MVT::i64);
6140          return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
6141                              Ptr, ST->getPointerInfo(), ST->isVolatile(),
6142                              ST->isNonTemporal(), ST->getAlignment());
6143        } else if (!ST->isVolatile() &&
6144                   TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
6145          // Many FP stores are not made apparent until after legalize, e.g. for
6146          // argument passing.  Since this is so common, custom legalize the
6147          // 64-bit integer store into two 32-bit stores.
6148          uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
6149          SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32);
6150          SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
6151          if (TLI.isBigEndian()) std::swap(Lo, Hi);
6152
6153          unsigned Alignment = ST->getAlignment();
6154          bool isVolatile = ST->isVolatile();
6155          bool isNonTemporal = ST->isNonTemporal();
6156
6157          SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo,
6158                                     Ptr, ST->getPointerInfo(),
6159                                     isVolatile, isNonTemporal,
6160                                     ST->getAlignment());
6161          Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr,
6162                            DAG.getConstant(4, Ptr.getValueType()));
6163          Alignment = MinAlign(Alignment, 4U);
6164          SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi,
6165                                     Ptr, ST->getPointerInfo().getWithOffset(4),
6166                                     isVolatile, isNonTemporal,
6167                                     Alignment);
6168          return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
6169                             St0, St1);
6170        }
6171
6172        break;
6173      }
6174    }
6175  }
6176
6177  // Try to infer better alignment information than the store already has.
6178  if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
6179    if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
6180      if (Align > ST->getAlignment())
6181        return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
6182                                 Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
6183                                 ST->isVolatile(), ST->isNonTemporal(), Align);
6184    }
6185  }
6186
6187  if (CombinerAA) {
6188    // Walk up chain skipping non-aliasing memory nodes.
6189    SDValue BetterChain = FindBetterChain(N, Chain);
6190
6191    // If there is a better chain.
6192    if (Chain != BetterChain) {
6193      SDValue ReplStore;
6194
6195      // Replace the chain to avoid dependency.
6196      if (ST->isTruncatingStore()) {
6197        ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr,
6198                                      ST->getPointerInfo(),
6199                                      ST->getMemoryVT(), ST->isVolatile(),
6200                                      ST->isNonTemporal(), ST->getAlignment());
6201      } else {
6202        ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr,
6203                                 ST->getPointerInfo(),
6204                                 ST->isVolatile(), ST->isNonTemporal(),
6205                                 ST->getAlignment());
6206      }
6207
6208      // Create token to keep both nodes around.
6209      SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
6210                                  MVT::Other, Chain, ReplStore);
6211
6212      // Make sure the new and old chains are cleaned up.
6213      AddToWorkList(Token.getNode());
6214
6215      // Don't add users to work list.
6216      return CombineTo(N, Token, false);
6217    }
6218  }
6219
6220  // Try transforming N to an indexed store.
6221  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
6222    return SDValue(N, 0);
6223
6224  // FIXME: is there such a thing as a truncating indexed store?
6225  if (ST->isTruncatingStore() && ST->isUnindexed() &&
6226      Value.getValueType().isInteger()) {
6227    // See if we can simplify the input to this truncstore with knowledge that
6228    // only the low bits are being used.  For example:
6229    // "truncstore (or (shl x, 8), y), i8"  -> "truncstore y, i8"
6230    SDValue Shorter =
6231      GetDemandedBits(Value,
6232                      APInt::getLowBitsSet(Value.getValueSizeInBits(),
6233                                           ST->getMemoryVT().getSizeInBits()));
6234    AddToWorkList(Value.getNode());
6235    if (Shorter.getNode())
6236      return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter,
6237                               Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
6238                               ST->isVolatile(), ST->isNonTemporal(),
6239                               ST->getAlignment());
6240
6241    // Otherwise, see if we can simplify the operation with
6242    // SimplifyDemandedBits, which only works if the value has a single use.
6243    if (SimplifyDemandedBits(Value,
6244                        APInt::getLowBitsSet(
6245                          Value.getValueType().getScalarType().getSizeInBits(),
6246                          ST->getMemoryVT().getScalarType().getSizeInBits())))
6247      return SDValue(N, 0);
6248  }
6249
6250  // If this is a load followed by a store to the same location, then the store
6251  // is dead/noop.
6252  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
6253    if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
6254        ST->isUnindexed() && !ST->isVolatile() &&
6255        // There can't be any side effects between the load and store, such as
6256        // a call or store.
6257        Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
6258      // The store is dead, remove it.
6259      return Chain;
6260    }
6261  }
6262
6263  // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
6264  // truncating store.  We can do this even if this is already a truncstore.
6265  if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
6266      && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
6267      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
6268                            ST->getMemoryVT())) {
6269    return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0),
6270                             Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
6271                             ST->isVolatile(), ST->isNonTemporal(),
6272                             ST->getAlignment());
6273  }
6274
6275  return ReduceLoadOpStoreWidth(N);
6276}
6277
6278SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
6279  SDValue InVec = N->getOperand(0);
6280  SDValue InVal = N->getOperand(1);
6281  SDValue EltNo = N->getOperand(2);
6282
6283  // If the inserted element is an UNDEF, just use the input vector.
6284  if (InVal.getOpcode() == ISD::UNDEF)
6285    return InVec;
6286
6287  // If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new
6288  // vector with the inserted element.
6289  if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
6290    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
6291    SmallVector<SDValue, 8> Ops(InVec.getNode()->op_begin(),
6292                                InVec.getNode()->op_end());
6293    if (Elt < Ops.size())
6294      Ops[Elt] = InVal;
6295    return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
6296                       InVec.getValueType(), &Ops[0], Ops.size());
6297  }
6298  // If the invec is an UNDEF and if EltNo is a constant, create a new
6299  // BUILD_VECTOR with undef elements and the inserted element.
6300  if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
6301      isa<ConstantSDNode>(EltNo)) {
6302    EVT VT = InVec.getValueType();
6303    EVT EltVT = VT.getVectorElementType();
6304    unsigned NElts = VT.getVectorNumElements();
6305    SmallVector<SDValue, 8> Ops(NElts, DAG.getUNDEF(EltVT));
6306
6307    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
6308    if (Elt < Ops.size())
6309      Ops[Elt] = InVal;
6310    return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
6311                       InVec.getValueType(), &Ops[0], Ops.size());
6312  }
6313  return SDValue();
6314}
6315
6316SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
6317  // (vextract (scalar_to_vector val, 0) -> val
6318  SDValue InVec = N->getOperand(0);
6319
6320 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
6321   // Check if the result type doesn't match the inserted element type. A
6322   // SCALAR_TO_VECTOR may truncate the inserted element and the
6323   // EXTRACT_VECTOR_ELT may widen the extracted vector.
6324   SDValue InOp = InVec.getOperand(0);
6325   EVT NVT = N->getValueType(0);
6326   if (InOp.getValueType() != NVT) {
6327     assert(InOp.getValueType().isInteger() && NVT.isInteger());
6328     return DAG.getSExtOrTrunc(InOp, InVec.getDebugLoc(), NVT);
6329   }
6330   return InOp;
6331 }
6332
6333  // Perform only after legalization to ensure build_vector / vector_shuffle
6334  // optimizations have already been done.
6335  if (!LegalOperations) return SDValue();
6336
6337  // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
6338  // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
6339  // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
6340  SDValue EltNo = N->getOperand(1);
6341
6342  if (isa<ConstantSDNode>(EltNo)) {
6343    int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
6344    bool NewLoad = false;
6345    bool BCNumEltsChanged = false;
6346    EVT VT = InVec.getValueType();
6347    EVT ExtVT = VT.getVectorElementType();
6348    EVT LVT = ExtVT;
6349
6350    if (InVec.getOpcode() == ISD::BITCAST) {
6351      EVT BCVT = InVec.getOperand(0).getValueType();
6352      if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
6353        return SDValue();
6354      if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
6355        BCNumEltsChanged = true;
6356      InVec = InVec.getOperand(0);
6357      ExtVT = BCVT.getVectorElementType();
6358      NewLoad = true;
6359    }
6360
6361    LoadSDNode *LN0 = NULL;
6362    const ShuffleVectorSDNode *SVN = NULL;
6363    if (ISD::isNormalLoad(InVec.getNode())) {
6364      LN0 = cast<LoadSDNode>(InVec);
6365    } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
6366               InVec.getOperand(0).getValueType() == ExtVT &&
6367               ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
6368      LN0 = cast<LoadSDNode>(InVec.getOperand(0));
6369    } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
6370      // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
6371      // =>
6372      // (load $addr+1*size)
6373
6374      // If the bit convert changed the number of elements, it is unsafe
6375      // to examine the mask.
6376      if (BCNumEltsChanged)
6377        return SDValue();
6378
6379      // Select the input vector, guarding against out of range extract vector.
6380      unsigned NumElems = VT.getVectorNumElements();
6381      int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
6382      InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
6383
6384      if (InVec.getOpcode() == ISD::BITCAST)
6385        InVec = InVec.getOperand(0);
6386      if (ISD::isNormalLoad(InVec.getNode())) {
6387        LN0 = cast<LoadSDNode>(InVec);
6388        Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems;
6389      }
6390    }
6391
6392    if (!LN0 || !LN0->hasOneUse() || LN0->isVolatile())
6393      return SDValue();
6394
6395    // If Idx was -1 above, Elt is going to be -1, so just return undef.
6396    if (Elt == -1)
6397      return DAG.getUNDEF(LN0->getBasePtr().getValueType());
6398
6399    unsigned Align = LN0->getAlignment();
6400    if (NewLoad) {
6401      // Check the resultant load doesn't need a higher alignment than the
6402      // original load.
6403      unsigned NewAlign =
6404        TLI.getTargetData()
6405            ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
6406
6407      if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
6408        return SDValue();
6409
6410      Align = NewAlign;
6411    }
6412
6413    SDValue NewPtr = LN0->getBasePtr();
6414    unsigned PtrOff = 0;
6415
6416    if (Elt) {
6417      PtrOff = LVT.getSizeInBits() * Elt / 8;
6418      EVT PtrType = NewPtr.getValueType();
6419      if (TLI.isBigEndian())
6420        PtrOff = VT.getSizeInBits() / 8 - PtrOff;
6421      NewPtr = DAG.getNode(ISD::ADD, N->getDebugLoc(), PtrType, NewPtr,
6422                           DAG.getConstant(PtrOff, PtrType));
6423    }
6424
6425    return DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
6426                       LN0->getPointerInfo().getWithOffset(PtrOff),
6427                       LN0->isVolatile(), LN0->isNonTemporal(), Align);
6428  }
6429
6430  return SDValue();
6431}
6432
6433SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
6434  unsigned NumInScalars = N->getNumOperands();
6435  EVT VT = N->getValueType(0);
6436
6437  // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
6438  // operations.  If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
6439  // at most two distinct vectors, turn this into a shuffle node.
6440  SDValue VecIn1, VecIn2;
6441  for (unsigned i = 0; i != NumInScalars; ++i) {
6442    // Ignore undef inputs.
6443    if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
6444
6445    // If this input is something other than a EXTRACT_VECTOR_ELT with a
6446    // constant index, bail out.
6447    if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6448        !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) {
6449      VecIn1 = VecIn2 = SDValue(0, 0);
6450      break;
6451    }
6452
6453    // If the input vector type disagrees with the result of the build_vector,
6454    // we can't make a shuffle.
6455    SDValue ExtractedFromVec = N->getOperand(i).getOperand(0);
6456    if (ExtractedFromVec.getValueType() != VT) {
6457      VecIn1 = VecIn2 = SDValue(0, 0);
6458      break;
6459    }
6460
6461    // Otherwise, remember this.  We allow up to two distinct input vectors.
6462    if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
6463      continue;
6464
6465    if (VecIn1.getNode() == 0) {
6466      VecIn1 = ExtractedFromVec;
6467    } else if (VecIn2.getNode() == 0) {
6468      VecIn2 = ExtractedFromVec;
6469    } else {
6470      // Too many inputs.
6471      VecIn1 = VecIn2 = SDValue(0, 0);
6472      break;
6473    }
6474  }
6475
6476  // If everything is good, we can make a shuffle operation.
6477  if (VecIn1.getNode()) {
6478    SmallVector<int, 8> Mask;
6479    for (unsigned i = 0; i != NumInScalars; ++i) {
6480      if (N->getOperand(i).getOpcode() == ISD::UNDEF) {
6481        Mask.push_back(-1);
6482        continue;
6483      }
6484
6485      // If extracting from the first vector, just use the index directly.
6486      SDValue Extract = N->getOperand(i);
6487      SDValue ExtVal = Extract.getOperand(1);
6488      if (Extract.getOperand(0) == VecIn1) {
6489        unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
6490        if (ExtIndex > VT.getVectorNumElements())
6491          return SDValue();
6492
6493        Mask.push_back(ExtIndex);
6494        continue;
6495      }
6496
6497      // Otherwise, use InIdx + VecSize
6498      unsigned Idx = cast<ConstantSDNode>(ExtVal)->getZExtValue();
6499      Mask.push_back(Idx+NumInScalars);
6500    }
6501
6502    // Add count and size info.
6503    if (!isTypeLegal(VT))
6504      return SDValue();
6505
6506    // Return the new VECTOR_SHUFFLE node.
6507    SDValue Ops[2];
6508    Ops[0] = VecIn1;
6509    Ops[1] = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6510    return DAG.getVectorShuffle(VT, N->getDebugLoc(), Ops[0], Ops[1], &Mask[0]);
6511  }
6512
6513  return SDValue();
6514}
6515
6516SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
6517  // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
6518  // EXTRACT_SUBVECTOR operations.  If so, and if the EXTRACT_SUBVECTOR vector
6519  // inputs come from at most two distinct vectors, turn this into a shuffle
6520  // node.
6521
6522  // If we only have one input vector, we don't need to do any concatenation.
6523  if (N->getNumOperands() == 1)
6524    return N->getOperand(0);
6525
6526  return SDValue();
6527}
6528
6529SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
6530  EVT VT = N->getValueType(0);
6531  unsigned NumElts = VT.getVectorNumElements();
6532
6533  SDValue N0 = N->getOperand(0);
6534
6535  assert(N0.getValueType().getVectorNumElements() == NumElts &&
6536        "Vector shuffle must be normalized in DAG");
6537
6538  // FIXME: implement canonicalizations from DAG.getVectorShuffle()
6539
6540  // If it is a splat, check if the argument vector is another splat or a
6541  // build_vector with all scalar elements the same.
6542  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
6543  if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
6544    SDNode *V = N0.getNode();
6545
6546    // If this is a bit convert that changes the element type of the vector but
6547    // not the number of vector elements, look through it.  Be careful not to
6548    // look though conversions that change things like v4f32 to v2f64.
6549    if (V->getOpcode() == ISD::BITCAST) {
6550      SDValue ConvInput = V->getOperand(0);
6551      if (ConvInput.getValueType().isVector() &&
6552          ConvInput.getValueType().getVectorNumElements() == NumElts)
6553        V = ConvInput.getNode();
6554    }
6555
6556    if (V->getOpcode() == ISD::BUILD_VECTOR) {
6557      assert(V->getNumOperands() == NumElts &&
6558             "BUILD_VECTOR has wrong number of operands");
6559      SDValue Base;
6560      bool AllSame = true;
6561      for (unsigned i = 0; i != NumElts; ++i) {
6562        if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
6563          Base = V->getOperand(i);
6564          break;
6565        }
6566      }
6567      // Splat of <u, u, u, u>, return <u, u, u, u>
6568      if (!Base.getNode())
6569        return N0;
6570      for (unsigned i = 0; i != NumElts; ++i) {
6571        if (V->getOperand(i) != Base) {
6572          AllSame = false;
6573          break;
6574        }
6575      }
6576      // Splat of <x, x, x, x>, return <x, x, x, x>
6577      if (AllSame)
6578        return N0;
6579    }
6580  }
6581  return SDValue();
6582}
6583
6584SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) {
6585  if (!TLI.getShouldFoldAtomicFences())
6586    return SDValue();
6587
6588  SDValue atomic = N->getOperand(0);
6589  switch (atomic.getOpcode()) {
6590    case ISD::ATOMIC_CMP_SWAP:
6591    case ISD::ATOMIC_SWAP:
6592    case ISD::ATOMIC_LOAD_ADD:
6593    case ISD::ATOMIC_LOAD_SUB:
6594    case ISD::ATOMIC_LOAD_AND:
6595    case ISD::ATOMIC_LOAD_OR:
6596    case ISD::ATOMIC_LOAD_XOR:
6597    case ISD::ATOMIC_LOAD_NAND:
6598    case ISD::ATOMIC_LOAD_MIN:
6599    case ISD::ATOMIC_LOAD_MAX:
6600    case ISD::ATOMIC_LOAD_UMIN:
6601    case ISD::ATOMIC_LOAD_UMAX:
6602      break;
6603    default:
6604      return SDValue();
6605  }
6606
6607  SDValue fence = atomic.getOperand(0);
6608  if (fence.getOpcode() != ISD::MEMBARRIER)
6609    return SDValue();
6610
6611  switch (atomic.getOpcode()) {
6612    case ISD::ATOMIC_CMP_SWAP:
6613      return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
6614                                    fence.getOperand(0),
6615                                    atomic.getOperand(1), atomic.getOperand(2),
6616                                    atomic.getOperand(3)), atomic.getResNo());
6617    case ISD::ATOMIC_SWAP:
6618    case ISD::ATOMIC_LOAD_ADD:
6619    case ISD::ATOMIC_LOAD_SUB:
6620    case ISD::ATOMIC_LOAD_AND:
6621    case ISD::ATOMIC_LOAD_OR:
6622    case ISD::ATOMIC_LOAD_XOR:
6623    case ISD::ATOMIC_LOAD_NAND:
6624    case ISD::ATOMIC_LOAD_MIN:
6625    case ISD::ATOMIC_LOAD_MAX:
6626    case ISD::ATOMIC_LOAD_UMIN:
6627    case ISD::ATOMIC_LOAD_UMAX:
6628      return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
6629                                    fence.getOperand(0),
6630                                    atomic.getOperand(1), atomic.getOperand(2)),
6631                     atomic.getResNo());
6632    default:
6633      return SDValue();
6634  }
6635}
6636
6637/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
6638/// an AND to a vector_shuffle with the destination vector and a zero vector.
6639/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
6640///      vector_shuffle V, Zero, <0, 4, 2, 4>
6641SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
6642  EVT VT = N->getValueType(0);
6643  DebugLoc dl = N->getDebugLoc();
6644  SDValue LHS = N->getOperand(0);
6645  SDValue RHS = N->getOperand(1);
6646  if (N->getOpcode() == ISD::AND) {
6647    if (RHS.getOpcode() == ISD::BITCAST)
6648      RHS = RHS.getOperand(0);
6649    if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
6650      SmallVector<int, 8> Indices;
6651      unsigned NumElts = RHS.getNumOperands();
6652      for (unsigned i = 0; i != NumElts; ++i) {
6653        SDValue Elt = RHS.getOperand(i);
6654        if (!isa<ConstantSDNode>(Elt))
6655          return SDValue();
6656        else if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
6657          Indices.push_back(i);
6658        else if (cast<ConstantSDNode>(Elt)->isNullValue())
6659          Indices.push_back(NumElts);
6660        else
6661          return SDValue();
6662      }
6663
6664      // Let's see if the target supports this vector_shuffle.
6665      EVT RVT = RHS.getValueType();
6666      if (!TLI.isVectorClearMaskLegal(Indices, RVT))
6667        return SDValue();
6668
6669      // Return the new VECTOR_SHUFFLE node.
6670      EVT EltVT = RVT.getVectorElementType();
6671      SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
6672                                     DAG.getConstant(0, EltVT));
6673      SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
6674                                 RVT, &ZeroOps[0], ZeroOps.size());
6675      LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
6676      SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
6677      return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
6678    }
6679  }
6680
6681  return SDValue();
6682}
6683
6684/// SimplifyVBinOp - Visit a binary vector operation, like ADD.
6685SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
6686  // After legalize, the target may be depending on adds and other
6687  // binary ops to provide legal ways to construct constants or other
6688  // things. Simplifying them may result in a loss of legality.
6689  if (LegalOperations) return SDValue();
6690
6691  assert(N->getValueType(0).isVector() &&
6692         "SimplifyVBinOp only works on vectors!");
6693
6694  SDValue LHS = N->getOperand(0);
6695  SDValue RHS = N->getOperand(1);
6696  SDValue Shuffle = XformToShuffleWithZero(N);
6697  if (Shuffle.getNode()) return Shuffle;
6698
6699  // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold
6700  // this operation.
6701  if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
6702      RHS.getOpcode() == ISD::BUILD_VECTOR) {
6703    SmallVector<SDValue, 8> Ops;
6704    for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
6705      SDValue LHSOp = LHS.getOperand(i);
6706      SDValue RHSOp = RHS.getOperand(i);
6707      // If these two elements can't be folded, bail out.
6708      if ((LHSOp.getOpcode() != ISD::UNDEF &&
6709           LHSOp.getOpcode() != ISD::Constant &&
6710           LHSOp.getOpcode() != ISD::ConstantFP) ||
6711          (RHSOp.getOpcode() != ISD::UNDEF &&
6712           RHSOp.getOpcode() != ISD::Constant &&
6713           RHSOp.getOpcode() != ISD::ConstantFP))
6714        break;
6715
6716      // Can't fold divide by zero.
6717      if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
6718          N->getOpcode() == ISD::FDIV) {
6719        if ((RHSOp.getOpcode() == ISD::Constant &&
6720             cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) ||
6721            (RHSOp.getOpcode() == ISD::ConstantFP &&
6722             cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero()))
6723          break;
6724      }
6725
6726      EVT VT = LHSOp.getValueType();
6727      assert(RHSOp.getValueType() == VT &&
6728             "SimplifyVBinOp with different BUILD_VECTOR element types");
6729      SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), VT,
6730                                   LHSOp, RHSOp);
6731      if (FoldOp.getOpcode() != ISD::UNDEF &&
6732          FoldOp.getOpcode() != ISD::Constant &&
6733          FoldOp.getOpcode() != ISD::ConstantFP)
6734        break;
6735      Ops.push_back(FoldOp);
6736      AddToWorkList(FoldOp.getNode());
6737    }
6738
6739    if (Ops.size() == LHS.getNumOperands())
6740      return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
6741                         LHS.getValueType(), &Ops[0], Ops.size());
6742  }
6743
6744  return SDValue();
6745}
6746
6747SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0,
6748                                    SDValue N1, SDValue N2){
6749  assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
6750
6751  SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
6752                                 cast<CondCodeSDNode>(N0.getOperand(2))->get());
6753
6754  // If we got a simplified select_cc node back from SimplifySelectCC, then
6755  // break it down into a new SETCC node, and a new SELECT node, and then return
6756  // the SELECT node, since we were called with a SELECT node.
6757  if (SCC.getNode()) {
6758    // Check to see if we got a select_cc back (to turn into setcc/select).
6759    // Otherwise, just return whatever node we got back, like fabs.
6760    if (SCC.getOpcode() == ISD::SELECT_CC) {
6761      SDValue SETCC = DAG.getNode(ISD::SETCC, N0.getDebugLoc(),
6762                                  N0.getValueType(),
6763                                  SCC.getOperand(0), SCC.getOperand(1),
6764                                  SCC.getOperand(4));
6765      AddToWorkList(SETCC.getNode());
6766      return DAG.getNode(ISD::SELECT, SCC.getDebugLoc(), SCC.getValueType(),
6767                         SCC.getOperand(2), SCC.getOperand(3), SETCC);
6768    }
6769
6770    return SCC;
6771  }
6772  return SDValue();
6773}
6774
6775/// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS
6776/// are the two values being selected between, see if we can simplify the
6777/// select.  Callers of this should assume that TheSelect is deleted if this
6778/// returns true.  As such, they should return the appropriate thing (e.g. the
6779/// node) back to the top-level of the DAG combiner loop to avoid it being
6780/// looked at.
6781bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
6782                                    SDValue RHS) {
6783
6784  // If this is a select from two identical things, try to pull the operation
6785  // through the select.
6786  if (LHS.getOpcode() != RHS.getOpcode() ||
6787      !LHS.hasOneUse() || !RHS.hasOneUse())
6788    return false;
6789
6790  // If this is a load and the token chain is identical, replace the select
6791  // of two loads with a load through a select of the address to load from.
6792  // This triggers in things like "select bool X, 10.0, 123.0" after the FP
6793  // constants have been dropped into the constant pool.
6794  if (LHS.getOpcode() == ISD::LOAD) {
6795    LoadSDNode *LLD = cast<LoadSDNode>(LHS);
6796    LoadSDNode *RLD = cast<LoadSDNode>(RHS);
6797
6798    // Token chains must be identical.
6799    if (LHS.getOperand(0) != RHS.getOperand(0) ||
6800        // Do not let this transformation reduce the number of volatile loads.
6801        LLD->isVolatile() || RLD->isVolatile() ||
6802        // If this is an EXTLOAD, the VT's must match.
6803        LLD->getMemoryVT() != RLD->getMemoryVT() ||
6804        // If this is an EXTLOAD, the kind of extension must match.
6805        (LLD->getExtensionType() != RLD->getExtensionType() &&
6806         // The only exception is if one of the extensions is anyext.
6807         LLD->getExtensionType() != ISD::EXTLOAD &&
6808         RLD->getExtensionType() != ISD::EXTLOAD) ||
6809        // FIXME: this discards src value information.  This is
6810        // over-conservative. It would be beneficial to be able to remember
6811        // both potential memory locations.  Since we are discarding
6812        // src value info, don't do the transformation if the memory
6813        // locations are not in the default address space.
6814        LLD->getPointerInfo().getAddrSpace() != 0 ||
6815        RLD->getPointerInfo().getAddrSpace() != 0)
6816      return false;
6817
6818    // Check that the select condition doesn't reach either load.  If so,
6819    // folding this will induce a cycle into the DAG.  If not, this is safe to
6820    // xform, so create a select of the addresses.
6821    SDValue Addr;
6822    if (TheSelect->getOpcode() == ISD::SELECT) {
6823      SDNode *CondNode = TheSelect->getOperand(0).getNode();
6824      if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
6825          (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
6826        return false;
6827      Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
6828                         LLD->getBasePtr().getValueType(),
6829                         TheSelect->getOperand(0), LLD->getBasePtr(),
6830                         RLD->getBasePtr());
6831    } else {  // Otherwise SELECT_CC
6832      SDNode *CondLHS = TheSelect->getOperand(0).getNode();
6833      SDNode *CondRHS = TheSelect->getOperand(1).getNode();
6834
6835      if ((LLD->hasAnyUseOfValue(1) &&
6836           (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
6837          (LLD->hasAnyUseOfValue(1) &&
6838           (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))))
6839        return false;
6840
6841      Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
6842                         LLD->getBasePtr().getValueType(),
6843                         TheSelect->getOperand(0),
6844                         TheSelect->getOperand(1),
6845                         LLD->getBasePtr(), RLD->getBasePtr(),
6846                         TheSelect->getOperand(4));
6847    }
6848
6849    SDValue Load;
6850    if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
6851      Load = DAG.getLoad(TheSelect->getValueType(0),
6852                         TheSelect->getDebugLoc(),
6853                         // FIXME: Discards pointer info.
6854                         LLD->getChain(), Addr, MachinePointerInfo(),
6855                         LLD->isVolatile(), LLD->isNonTemporal(),
6856                         LLD->getAlignment());
6857    } else {
6858      Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ?
6859                            RLD->getExtensionType() : LLD->getExtensionType(),
6860                            TheSelect->getValueType(0),
6861                            TheSelect->getDebugLoc(),
6862                            // FIXME: Discards pointer info.
6863                            LLD->getChain(), Addr, MachinePointerInfo(),
6864                            LLD->getMemoryVT(), LLD->isVolatile(),
6865                            LLD->isNonTemporal(), LLD->getAlignment());
6866    }
6867
6868    // Users of the select now use the result of the load.
6869    CombineTo(TheSelect, Load);
6870
6871    // Users of the old loads now use the new load's chain.  We know the
6872    // old-load value is dead now.
6873    CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
6874    CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
6875    return true;
6876  }
6877
6878  return false;
6879}
6880
6881/// SimplifySelectCC - Simplify an expression of the form (N0 cond N1) ? N2 : N3
6882/// where 'cond' is the comparison specified by CC.
6883SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
6884                                      SDValue N2, SDValue N3,
6885                                      ISD::CondCode CC, bool NotExtCompare) {
6886  // (x ? y : y) -> y.
6887  if (N2 == N3) return N2;
6888
6889  EVT VT = N2.getValueType();
6890  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
6891  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
6892  ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
6893
6894  // Determine if the condition we're dealing with is constant
6895  SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()),
6896                              N0, N1, CC, DL, false);
6897  if (SCC.getNode()) AddToWorkList(SCC.getNode());
6898  ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode());
6899
6900  // fold select_cc true, x, y -> x
6901  if (SCCC && !SCCC->isNullValue())
6902    return N2;
6903  // fold select_cc false, x, y -> y
6904  if (SCCC && SCCC->isNullValue())
6905    return N3;
6906
6907  // Check to see if we can simplify the select into an fabs node
6908  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
6909    // Allow either -0.0 or 0.0
6910    if (CFP->getValueAPF().isZero()) {
6911      // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
6912      if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
6913          N0 == N2 && N3.getOpcode() == ISD::FNEG &&
6914          N2 == N3.getOperand(0))
6915        return DAG.getNode(ISD::FABS, DL, VT, N0);
6916
6917      // select (setl[te] X, +/-0.0), fneg(X), X -> fabs
6918      if ((CC == ISD::SETLT || CC == ISD::SETLE) &&
6919          N0 == N3 && N2.getOpcode() == ISD::FNEG &&
6920          N2.getOperand(0) == N3)
6921        return DAG.getNode(ISD::FABS, DL, VT, N3);
6922    }
6923  }
6924
6925  // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
6926  // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
6927  // in it.  This is a win when the constant is not otherwise available because
6928  // it replaces two constant pool loads with one.  We only do this if the FP
6929  // type is known to be legal, because if it isn't, then we are before legalize
6930  // types an we want the other legalization to happen first (e.g. to avoid
6931  // messing with soft float) and if the ConstantFP is not legal, because if
6932  // it is legal, we may not need to store the FP constant in a constant pool.
6933  if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2))
6934    if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) {
6935      if (TLI.isTypeLegal(N2.getValueType()) &&
6936          (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) !=
6937           TargetLowering::Legal) &&
6938          // If both constants have multiple uses, then we won't need to do an
6939          // extra load, they are likely around in registers for other users.
6940          (TV->hasOneUse() || FV->hasOneUse())) {
6941        Constant *Elts[] = {
6942          const_cast<ConstantFP*>(FV->getConstantFPValue()),
6943          const_cast<ConstantFP*>(TV->getConstantFPValue())
6944        };
6945        const Type *FPTy = Elts[0]->getType();
6946        const TargetData &TD = *TLI.getTargetData();
6947
6948        // Create a ConstantArray of the two constants.
6949        Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2);
6950        SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
6951                                            TD.getPrefTypeAlignment(FPTy));
6952        unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
6953
6954        // Get the offsets to the 0 and 1 element of the array so that we can
6955        // select between them.
6956        SDValue Zero = DAG.getIntPtrConstant(0);
6957        unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
6958        SDValue One = DAG.getIntPtrConstant(EltSize);
6959
6960        SDValue Cond = DAG.getSetCC(DL,
6961                                    TLI.getSetCCResultType(N0.getValueType()),
6962                                    N0, N1, CC);
6963        SDValue CstOffset = DAG.getNode(ISD::SELECT, DL, Zero.getValueType(),
6964                                        Cond, One, Zero);
6965        CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx,
6966                            CstOffset);
6967        return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
6968                           MachinePointerInfo::getConstantPool(), false,
6969                           false, Alignment);
6970
6971      }
6972    }
6973
6974  // Check to see if we can perform the "gzip trick", transforming
6975  // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
6976  if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
6977      N0.getValueType().isInteger() &&
6978      N2.getValueType().isInteger() &&
6979      (N1C->isNullValue() ||                         // (a < 0) ? b : 0
6980       (N1C->getAPIntValue() == 1 && N0 == N2))) {   // (a < 1) ? a : 0
6981    EVT XType = N0.getValueType();
6982    EVT AType = N2.getValueType();
6983    if (XType.bitsGE(AType)) {
6984      // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
6985      // single-bit constant.
6986      if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) {
6987        unsigned ShCtV = N2C->getAPIntValue().logBase2();
6988        ShCtV = XType.getSizeInBits()-ShCtV-1;
6989        SDValue ShCt = DAG.getConstant(ShCtV, getShiftAmountTy());
6990        SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(),
6991                                    XType, N0, ShCt);
6992        AddToWorkList(Shift.getNode());
6993
6994        if (XType.bitsGT(AType)) {
6995          Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
6996          AddToWorkList(Shift.getNode());
6997        }
6998
6999        return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
7000      }
7001
7002      SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(),
7003                                  XType, N0,
7004                                  DAG.getConstant(XType.getSizeInBits()-1,
7005                                                  getShiftAmountTy()));
7006      AddToWorkList(Shift.getNode());
7007
7008      if (XType.bitsGT(AType)) {
7009        Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
7010        AddToWorkList(Shift.getNode());
7011      }
7012
7013      return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
7014    }
7015  }
7016
7017  // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
7018  // where y is has a single bit set.
7019  // A plaintext description would be, we can turn the SELECT_CC into an AND
7020  // when the condition can be materialized as an all-ones register.  Any
7021  // single bit-test can be materialized as an all-ones register with
7022  // shift-left and shift-right-arith.
7023  if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
7024      N0->getValueType(0) == VT &&
7025      N1C && N1C->isNullValue() &&
7026      N2C && N2C->isNullValue()) {
7027    SDValue AndLHS = N0->getOperand(0);
7028    ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7029    if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
7030      // Shift the tested bit over the sign bit.
7031      APInt AndMask = ConstAndRHS->getAPIntValue();
7032      SDValue ShlAmt =
7033        DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy());
7034      SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt);
7035
7036      // Now arithmetic right shift it all the way over, so the result is either
7037      // all-ones, or zero.
7038      SDValue ShrAmt =
7039        DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy());
7040      SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt);
7041
7042      return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
7043    }
7044  }
7045
7046  // fold select C, 16, 0 -> shl C, 4
7047  if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
7048      TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent) {
7049
7050    // If the caller doesn't want us to simplify this into a zext of a compare,
7051    // don't do it.
7052    if (NotExtCompare && N2C->getAPIntValue() == 1)
7053      return SDValue();
7054
7055    // Get a SetCC of the condition
7056    // FIXME: Should probably make sure that setcc is legal if we ever have a
7057    // target where it isn't.
7058    SDValue Temp, SCC;
7059    // cast from setcc result type to select result type
7060    if (LegalTypes) {
7061      SCC  = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()),
7062                          N0, N1, CC);
7063      if (N2.getValueType().bitsLT(SCC.getValueType()))
7064        Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), N2.getValueType());
7065      else
7066        Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
7067                           N2.getValueType(), SCC);
7068    } else {
7069      SCC  = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC);
7070      Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
7071                         N2.getValueType(), SCC);
7072    }
7073
7074    AddToWorkList(SCC.getNode());
7075    AddToWorkList(Temp.getNode());
7076
7077    if (N2C->getAPIntValue() == 1)
7078      return Temp;
7079
7080    // shl setcc result by log2 n2c
7081    return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
7082                       DAG.getConstant(N2C->getAPIntValue().logBase2(),
7083                                       getShiftAmountTy()));
7084  }
7085
7086  // Check to see if this is the equivalent of setcc
7087  // FIXME: Turn all of these into setcc if setcc if setcc is legal
7088  // otherwise, go ahead with the folds.
7089  if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
7090    EVT XType = N0.getValueType();
7091    if (!LegalOperations ||
7092        TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(XType))) {
7093      SDValue Res = DAG.getSetCC(DL, TLI.getSetCCResultType(XType), N0, N1, CC);
7094      if (Res.getValueType() != VT)
7095        Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res);
7096      return Res;
7097    }
7098
7099    // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X))))
7100    if (N1C && N1C->isNullValue() && CC == ISD::SETEQ &&
7101        (!LegalOperations ||
7102         TLI.isOperationLegal(ISD::CTLZ, XType))) {
7103      SDValue Ctlz = DAG.getNode(ISD::CTLZ, N0.getDebugLoc(), XType, N0);
7104      return DAG.getNode(ISD::SRL, DL, XType, Ctlz,
7105                         DAG.getConstant(Log2_32(XType.getSizeInBits()),
7106                                         getShiftAmountTy()));
7107    }
7108    // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
7109    if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
7110      SDValue NegN0 = DAG.getNode(ISD::SUB, N0.getDebugLoc(),
7111                                  XType, DAG.getConstant(0, XType), N0);
7112      SDValue NotN0 = DAG.getNOT(N0.getDebugLoc(), N0, XType);
7113      return DAG.getNode(ISD::SRL, DL, XType,
7114                         DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0),
7115                         DAG.getConstant(XType.getSizeInBits()-1,
7116                                         getShiftAmountTy()));
7117    }
7118    // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
7119    if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
7120      SDValue Sign = DAG.getNode(ISD::SRL, N0.getDebugLoc(), XType, N0,
7121                                 DAG.getConstant(XType.getSizeInBits()-1,
7122                                                 getShiftAmountTy()));
7123      return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType));
7124    }
7125  }
7126
7127  // Check to see if this is an integer abs.
7128  // select_cc setg[te] X,  0,  X, -X ->
7129  // select_cc setgt    X, -1,  X, -X ->
7130  // select_cc setl[te] X,  0, -X,  X ->
7131  // select_cc setlt    X,  1, -X,  X ->
7132  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
7133  if (N1C) {
7134    ConstantSDNode *SubC = NULL;
7135    if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
7136         (N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
7137        N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
7138      SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
7139    else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
7140              (N1C->isOne() && CC == ISD::SETLT)) &&
7141             N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
7142      SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
7143
7144    EVT XType = N0.getValueType();
7145    if (SubC && SubC->isNullValue() && XType.isInteger()) {
7146      SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
7147                                  N0,
7148                                  DAG.getConstant(XType.getSizeInBits()-1,
7149                                                  getShiftAmountTy()));
7150      SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
7151                                XType, N0, Shift);
7152      AddToWorkList(Shift.getNode());
7153      AddToWorkList(Add.getNode());
7154      return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
7155    }
7156  }
7157
7158  return SDValue();
7159}
7160
7161/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC.
7162SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
7163                                   SDValue N1, ISD::CondCode Cond,
7164                                   DebugLoc DL, bool foldBooleans) {
7165  TargetLowering::DAGCombinerInfo
7166    DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this);
7167  return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
7168}
7169
7170/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
7171/// return a DAG expression to select that will generate the same value by
7172/// multiplying by a magic number.  See:
7173/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
7174SDValue DAGCombiner::BuildSDIV(SDNode *N) {
7175  std::vector<SDNode*> Built;
7176  SDValue S = TLI.BuildSDIV(N, DAG, &Built);
7177
7178  for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
7179       ii != ee; ++ii)
7180    AddToWorkList(*ii);
7181  return S;
7182}
7183
7184/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
7185/// return a DAG expression to select that will generate the same value by
7186/// multiplying by a magic number.  See:
7187/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
7188SDValue DAGCombiner::BuildUDIV(SDNode *N) {
7189  std::vector<SDNode*> Built;
7190  SDValue S = TLI.BuildUDIV(N, DAG, &Built);
7191
7192  for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
7193       ii != ee; ++ii)
7194    AddToWorkList(*ii);
7195  return S;
7196}
7197
7198/// FindBaseOffset - Return true if base is a frame index, which is known not
7199// to alias with anything but itself.  Provides base object and offset as
7200// results.
7201static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
7202                           const GlobalValue *&GV, void *&CV) {
7203  // Assume it is a primitive operation.
7204  Base = Ptr; Offset = 0; GV = 0; CV = 0;
7205
7206  // If it's an adding a simple constant then integrate the offset.
7207  if (Base.getOpcode() == ISD::ADD) {
7208    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) {
7209      Base = Base.getOperand(0);
7210      Offset += C->getZExtValue();
7211    }
7212  }
7213
7214  // Return the underlying GlobalValue, and update the Offset.  Return false
7215  // for GlobalAddressSDNode since the same GlobalAddress may be represented
7216  // by multiple nodes with different offsets.
7217  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) {
7218    GV = G->getGlobal();
7219    Offset += G->getOffset();
7220    return false;
7221  }
7222
7223  // Return the underlying Constant value, and update the Offset.  Return false
7224  // for ConstantSDNodes since the same constant pool entry may be represented
7225  // by multiple nodes with different offsets.
7226  if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) {
7227    CV = C->isMachineConstantPoolEntry() ? (void *)C->getMachineCPVal()
7228                                         : (void *)C->getConstVal();
7229    Offset += C->getOffset();
7230    return false;
7231  }
7232  // If it's any of the following then it can't alias with anything but itself.
7233  return isa<FrameIndexSDNode>(Base);
7234}
7235
7236/// isAlias - Return true if there is any possibility that the two addresses
7237/// overlap.
7238bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
7239                          const Value *SrcValue1, int SrcValueOffset1,
7240                          unsigned SrcValueAlign1,
7241                          const MDNode *TBAAInfo1,
7242                          SDValue Ptr2, int64_t Size2,
7243                          const Value *SrcValue2, int SrcValueOffset2,
7244                          unsigned SrcValueAlign2,
7245                          const MDNode *TBAAInfo2) const {
7246  // If they are the same then they must be aliases.
7247  if (Ptr1 == Ptr2) return true;
7248
7249  // Gather base node and offset information.
7250  SDValue Base1, Base2;
7251  int64_t Offset1, Offset2;
7252  const GlobalValue *GV1, *GV2;
7253  void *CV1, *CV2;
7254  bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
7255  bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);
7256
7257  // If they have a same base address then check to see if they overlap.
7258  if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
7259    return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
7260
7261  // It is possible for different frame indices to alias each other, mostly
7262  // when tail call optimization reuses return address slots for arguments.
7263  // To catch this case, look up the actual index of frame indices to compute
7264  // the real alias relationship.
7265  if (isFrameIndex1 && isFrameIndex2) {
7266    MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
7267    Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
7268    Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
7269    return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
7270  }
7271
7272  // Otherwise, if we know what the bases are, and they aren't identical, then
7273  // we know they cannot alias.
7274  if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
7275    return false;
7276
7277  // If we know required SrcValue1 and SrcValue2 have relatively large alignment
7278  // compared to the size and offset of the access, we may be able to prove they
7279  // do not alias.  This check is conservative for now to catch cases created by
7280  // splitting vector types.
7281  if ((SrcValueAlign1 == SrcValueAlign2) &&
7282      (SrcValueOffset1 != SrcValueOffset2) &&
7283      (Size1 == Size2) && (SrcValueAlign1 > Size1)) {
7284    int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
7285    int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
7286
7287    // There is no overlap between these relatively aligned accesses of similar
7288    // size, return no alias.
7289    if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
7290      return false;
7291  }
7292
7293  if (CombinerGlobalAA) {
7294    // Use alias analysis information.
7295    int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
7296    int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset;
7297    int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset;
7298    AliasAnalysis::AliasResult AAResult =
7299      AA.alias(AliasAnalysis::Location(SrcValue1, Overlap1, TBAAInfo1),
7300               AliasAnalysis::Location(SrcValue2, Overlap2, TBAAInfo2));
7301    if (AAResult == AliasAnalysis::NoAlias)
7302      return false;
7303  }
7304
7305  // Otherwise we have to assume they alias.
7306  return true;
7307}
7308
7309/// FindAliasInfo - Extracts the relevant alias information from the memory
7310/// node.  Returns true if the operand was a load.
7311bool DAGCombiner::FindAliasInfo(SDNode *N,
7312                        SDValue &Ptr, int64_t &Size,
7313                        const Value *&SrcValue,
7314                        int &SrcValueOffset,
7315                        unsigned &SrcValueAlign,
7316                        const MDNode *&TBAAInfo) const {
7317  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
7318    Ptr = LD->getBasePtr();
7319    Size = LD->getMemoryVT().getSizeInBits() >> 3;
7320    SrcValue = LD->getSrcValue();
7321    SrcValueOffset = LD->getSrcValueOffset();
7322    SrcValueAlign = LD->getOriginalAlignment();
7323    TBAAInfo = LD->getTBAAInfo();
7324    return true;
7325  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
7326    Ptr = ST->getBasePtr();
7327    Size = ST->getMemoryVT().getSizeInBits() >> 3;
7328    SrcValue = ST->getSrcValue();
7329    SrcValueOffset = ST->getSrcValueOffset();
7330    SrcValueAlign = ST->getOriginalAlignment();
7331    TBAAInfo = ST->getTBAAInfo();
7332  } else {
7333    llvm_unreachable("FindAliasInfo expected a memory operand");
7334  }
7335
7336  return false;
7337}
7338
7339/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
7340/// looking for aliasing nodes and adding them to the Aliases vector.
7341void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
7342                                   SmallVector<SDValue, 8> &Aliases) {
7343  SmallVector<SDValue, 8> Chains;     // List of chains to visit.
7344  SmallPtrSet<SDNode *, 16> Visited;  // Visited node set.
7345
7346  // Get alias information for node.
7347  SDValue Ptr;
7348  int64_t Size;
7349  const Value *SrcValue;
7350  int SrcValueOffset;
7351  unsigned SrcValueAlign;
7352  const MDNode *SrcTBAAInfo;
7353  bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
7354                              SrcValueAlign, SrcTBAAInfo);
7355
7356  // Starting off.
7357  Chains.push_back(OriginalChain);
7358  unsigned Depth = 0;
7359
7360  // Look at each chain and determine if it is an alias.  If so, add it to the
7361  // aliases list.  If not, then continue up the chain looking for the next
7362  // candidate.
7363  while (!Chains.empty()) {
7364    SDValue Chain = Chains.back();
7365    Chains.pop_back();
7366
7367    // For TokenFactor nodes, look at each operand and only continue up the
7368    // chain until we find two aliases.  If we've seen two aliases, assume we'll
7369    // find more and revert to original chain since the xform is unlikely to be
7370    // profitable.
7371    //
7372    // FIXME: The depth check could be made to return the last non-aliasing
7373    // chain we found before we hit a tokenfactor rather than the original
7374    // chain.
7375    if (Depth > 6 || Aliases.size() == 2) {
7376      Aliases.clear();
7377      Aliases.push_back(OriginalChain);
7378      break;
7379    }
7380
7381    // Don't bother if we've been before.
7382    if (!Visited.insert(Chain.getNode()))
7383      continue;
7384
7385    switch (Chain.getOpcode()) {
7386    case ISD::EntryToken:
7387      // Entry token is ideal chain operand, but handled in FindBetterChain.
7388      break;
7389
7390    case ISD::LOAD:
7391    case ISD::STORE: {
7392      // Get alias information for Chain.
7393      SDValue OpPtr;
7394      int64_t OpSize;
7395      const Value *OpSrcValue;
7396      int OpSrcValueOffset;
7397      unsigned OpSrcValueAlign;
7398      const MDNode *OpSrcTBAAInfo;
7399      bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
7400                                    OpSrcValue, OpSrcValueOffset,
7401                                    OpSrcValueAlign,
7402                                    OpSrcTBAAInfo);
7403
7404      // If chain is alias then stop here.
7405      if (!(IsLoad && IsOpLoad) &&
7406          isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign,
7407                  SrcTBAAInfo,
7408                  OpPtr, OpSize, OpSrcValue, OpSrcValueOffset,
7409                  OpSrcValueAlign, OpSrcTBAAInfo)) {
7410        Aliases.push_back(Chain);
7411      } else {
7412        // Look further up the chain.
7413        Chains.push_back(Chain.getOperand(0));
7414        ++Depth;
7415      }
7416      break;
7417    }
7418
7419    case ISD::TokenFactor:
7420      // We have to check each of the operands of the token factor for "small"
7421      // token factors, so we queue them up.  Adding the operands to the queue
7422      // (stack) in reverse order maintains the original order and increases the
7423      // likelihood that getNode will find a matching token factor (CSE.)
7424      if (Chain.getNumOperands() > 16) {
7425        Aliases.push_back(Chain);
7426        break;
7427      }
7428      for (unsigned n = Chain.getNumOperands(); n;)
7429        Chains.push_back(Chain.getOperand(--n));
7430      ++Depth;
7431      break;
7432
7433    default:
7434      // For all other instructions we will just have to take what we can get.
7435      Aliases.push_back(Chain);
7436      break;
7437    }
7438  }
7439}
7440
7441/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking
7442/// for a better chain (aliasing node.)
7443SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
7444  SmallVector<SDValue, 8> Aliases;  // Ops for replacing token factor.
7445
7446  // Accumulate all the aliases to this node.
7447  GatherAllAliases(N, OldChain, Aliases);
7448
7449  if (Aliases.size() == 0) {
7450    // If no operands then chain to entry token.
7451    return DAG.getEntryNode();
7452  } else if (Aliases.size() == 1) {
7453    // If a single operand then chain to it.  We don't need to revisit it.
7454    return Aliases[0];
7455  }
7456
7457  // Construct a custom tailored token factor.
7458  return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
7459                     &Aliases[0], Aliases.size());
7460}
7461
7462// SelectionDAG::Combine - This is the entry point for the file.
7463//
7464void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA,
7465                           CodeGenOpt::Level OptLevel) {
7466  /// run - This is the main entry point to this class.
7467  ///
7468  DAGCombiner(*this, AA, OptLevel).Run(Level);
7469}
7470