Reassociate.cpp revision baf3c404409d5e47b13984a7f95bfbd6d1f2e79e
1//===- Reassociate.cpp - Reassociate binary expressions -------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass reassociates commutative expressions in an order that is designed
11// to promote better constant propagation, GCSE, LICM, PRE...
12//
13// For example: 4 + (x + 5) -> x + (4 + 5)
14//
15// In the implementation of this algorithm, constants are assigned rank = 0,
16// function arguments are rank = 1, and other values are assigned ranks
17// corresponding to the reverse post order traversal of current function
18// (starting at 2), which effectively gives values in deep loops higher rank
19// than values not in loops.
20//
21//===----------------------------------------------------------------------===//
22
23#define DEBUG_TYPE "reassociate"
24#include "llvm/Transforms/Scalar.h"
25#include "llvm/Constants.h"
26#include "llvm/DerivedTypes.h"
27#include "llvm/Function.h"
28#include "llvm/Instructions.h"
29#include "llvm/IntrinsicInst.h"
30#include "llvm/LLVMContext.h"
31#include "llvm/Pass.h"
32#include "llvm/Assembly/Writer.h"
33#include "llvm/Support/CFG.h"
34#include "llvm/Support/Compiler.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ValueHandle.h"
37#include "llvm/ADT/PostOrderIterator.h"
38#include "llvm/ADT/Statistic.h"
39#include <algorithm>
40#include <map>
41using namespace llvm;
42
43STATISTIC(NumLinear , "Number of insts linearized");
44STATISTIC(NumChanged, "Number of insts reassociated");
45STATISTIC(NumAnnihil, "Number of expr tree annihilated");
46STATISTIC(NumFactor , "Number of multiplies factored");
47
48namespace {
49  struct VISIBILITY_HIDDEN ValueEntry {
50    unsigned Rank;
51    Value *Op;
52    ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {}
53  };
54  inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) {
55    return LHS.Rank > RHS.Rank;   // Sort so that highest rank goes to start.
56  }
57}
58
59#ifndef NDEBUG
60/// PrintOps - Print out the expression identified in the Ops list.
61///
62static void PrintOps(Instruction *I, const std::vector<ValueEntry> &Ops) {
63  Module *M = I->getParent()->getParent()->getParent();
64  cerr << Instruction::getOpcodeName(I->getOpcode()) << " "
65       << *Ops[0].Op->getType();
66  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
67    WriteAsOperand(*cerr.stream() << " ", Ops[i].Op, false, M);
68    cerr << "," << Ops[i].Rank;
69  }
70}
71#endif
72
73namespace {
74  class VISIBILITY_HIDDEN Reassociate : public FunctionPass {
75    std::map<BasicBlock*, unsigned> RankMap;
76    std::map<AssertingVH<>, unsigned> ValueRankMap;
77    bool MadeChange;
78  public:
79    static char ID; // Pass identification, replacement for typeid
80    Reassociate() : FunctionPass(&ID) {}
81
82    bool runOnFunction(Function &F);
83
84    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
85      AU.setPreservesCFG();
86    }
87  private:
88    void BuildRankMap(Function &F);
89    unsigned getRank(Value *V);
90    void ReassociateExpression(BinaryOperator *I);
91    void RewriteExprTree(BinaryOperator *I, std::vector<ValueEntry> &Ops,
92                         unsigned Idx = 0);
93    Value *OptimizeExpression(BinaryOperator *I, std::vector<ValueEntry> &Ops);
94    void LinearizeExprTree(BinaryOperator *I, std::vector<ValueEntry> &Ops);
95    void LinearizeExpr(BinaryOperator *I);
96    Value *RemoveFactorFromExpression(Value *V, Value *Factor);
97    void ReassociateBB(BasicBlock *BB);
98
99    void RemoveDeadBinaryOp(Value *V);
100  };
101}
102
103char Reassociate::ID = 0;
104static RegisterPass<Reassociate> X("reassociate", "Reassociate expressions");
105
106// Public interface to the Reassociate pass
107FunctionPass *llvm::createReassociatePass() { return new Reassociate(); }
108
109void Reassociate::RemoveDeadBinaryOp(Value *V) {
110  Instruction *Op = dyn_cast<Instruction>(V);
111  if (!Op || !isa<BinaryOperator>(Op) || !isa<CmpInst>(Op) || !Op->use_empty())
112    return;
113
114  Value *LHS = Op->getOperand(0), *RHS = Op->getOperand(1);
115  RemoveDeadBinaryOp(LHS);
116  RemoveDeadBinaryOp(RHS);
117}
118
119
120static bool isUnmovableInstruction(Instruction *I) {
121  if (I->getOpcode() == Instruction::PHI ||
122      I->getOpcode() == Instruction::Alloca ||
123      I->getOpcode() == Instruction::Load ||
124      I->getOpcode() == Instruction::Malloc ||
125      I->getOpcode() == Instruction::Invoke ||
126      (I->getOpcode() == Instruction::Call &&
127       !isa<DbgInfoIntrinsic>(I)) ||
128      I->getOpcode() == Instruction::UDiv ||
129      I->getOpcode() == Instruction::SDiv ||
130      I->getOpcode() == Instruction::FDiv ||
131      I->getOpcode() == Instruction::URem ||
132      I->getOpcode() == Instruction::SRem ||
133      I->getOpcode() == Instruction::FRem)
134    return true;
135  return false;
136}
137
138void Reassociate::BuildRankMap(Function &F) {
139  unsigned i = 2;
140
141  // Assign distinct ranks to function arguments
142  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
143    ValueRankMap[&*I] = ++i;
144
145  ReversePostOrderTraversal<Function*> RPOT(&F);
146  for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
147         E = RPOT.end(); I != E; ++I) {
148    BasicBlock *BB = *I;
149    unsigned BBRank = RankMap[BB] = ++i << 16;
150
151    // Walk the basic block, adding precomputed ranks for any instructions that
152    // we cannot move.  This ensures that the ranks for these instructions are
153    // all different in the block.
154    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
155      if (isUnmovableInstruction(I))
156        ValueRankMap[&*I] = ++BBRank;
157  }
158}
159
160unsigned Reassociate::getRank(Value *V) {
161  if (isa<Argument>(V)) return ValueRankMap[V];   // Function argument...
162
163  Instruction *I = dyn_cast<Instruction>(V);
164  if (I == 0) return 0;  // Otherwise it's a global or constant, rank 0.
165
166  unsigned &CachedRank = ValueRankMap[I];
167  if (CachedRank) return CachedRank;    // Rank already known?
168
169  // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that
170  // we can reassociate expressions for code motion!  Since we do not recurse
171  // for PHI nodes, we cannot have infinite recursion here, because there
172  // cannot be loops in the value graph that do not go through PHI nodes.
173  unsigned Rank = 0, MaxRank = RankMap[I->getParent()];
174  for (unsigned i = 0, e = I->getNumOperands();
175       i != e && Rank != MaxRank; ++i)
176    Rank = std::max(Rank, getRank(I->getOperand(i)));
177
178  // If this is a not or neg instruction, do not count it for rank.  This
179  // assures us that X and ~X will have the same rank.
180  if (!I->getType()->isInteger() ||
181      (!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I)))
182    ++Rank;
183
184  //DOUT << "Calculated Rank[" << V->getName() << "] = "
185  //     << Rank << "\n";
186
187  return CachedRank = Rank;
188}
189
190/// isReassociableOp - Return true if V is an instruction of the specified
191/// opcode and if it only has one use.
192static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
193  if ((V->hasOneUse() || V->use_empty()) && isa<Instruction>(V) &&
194      cast<Instruction>(V)->getOpcode() == Opcode)
195    return cast<BinaryOperator>(V);
196  return 0;
197}
198
199/// LowerNegateToMultiply - Replace 0-X with X*-1.
200///
201static Instruction *LowerNegateToMultiply(Instruction *Neg,
202                              std::map<AssertingVH<>, unsigned> &ValueRankMap,
203                              LLVMContext &Context) {
204  Constant *Cst = Neg->getContext().getAllOnesValue(Neg->getType());
205
206  Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg);
207  ValueRankMap.erase(Neg);
208  Res->takeName(Neg);
209  Neg->replaceAllUsesWith(Res);
210  Neg->eraseFromParent();
211  return Res;
212}
213
214// Given an expression of the form '(A+B)+(D+C)', turn it into '(((A+B)+C)+D)'.
215// Note that if D is also part of the expression tree that we recurse to
216// linearize it as well.  Besides that case, this does not recurse into A,B, or
217// C.
218void Reassociate::LinearizeExpr(BinaryOperator *I) {
219  BinaryOperator *LHS = cast<BinaryOperator>(I->getOperand(0));
220  BinaryOperator *RHS = cast<BinaryOperator>(I->getOperand(1));
221  assert(isReassociableOp(LHS, I->getOpcode()) &&
222         isReassociableOp(RHS, I->getOpcode()) &&
223         "Not an expression that needs linearization?");
224
225  DOUT << "Linear" << *LHS << *RHS << *I;
226
227  // Move the RHS instruction to live immediately before I, avoiding breaking
228  // dominator properties.
229  RHS->moveBefore(I);
230
231  // Move operands around to do the linearization.
232  I->setOperand(1, RHS->getOperand(0));
233  RHS->setOperand(0, LHS);
234  I->setOperand(0, RHS);
235
236  ++NumLinear;
237  MadeChange = true;
238  DOUT << "Linearized: " << *I;
239
240  // If D is part of this expression tree, tail recurse.
241  if (isReassociableOp(I->getOperand(1), I->getOpcode()))
242    LinearizeExpr(I);
243}
244
245
246/// LinearizeExprTree - Given an associative binary expression tree, traverse
247/// all of the uses putting it into canonical form.  This forces a left-linear
248/// form of the the expression (((a+b)+c)+d), and collects information about the
249/// rank of the non-tree operands.
250///
251/// NOTE: These intentionally destroys the expression tree operands (turning
252/// them into undef values) to reduce #uses of the values.  This means that the
253/// caller MUST use something like RewriteExprTree to put the values back in.
254///
255void Reassociate::LinearizeExprTree(BinaryOperator *I,
256                                    std::vector<ValueEntry> &Ops) {
257  Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
258  unsigned Opcode = I->getOpcode();
259  LLVMContext &Context = I->getContext();
260
261  // First step, linearize the expression if it is in ((A+B)+(C+D)) form.
262  BinaryOperator *LHSBO = isReassociableOp(LHS, Opcode);
263  BinaryOperator *RHSBO = isReassociableOp(RHS, Opcode);
264
265  // If this is a multiply expression tree and it contains internal negations,
266  // transform them into multiplies by -1 so they can be reassociated.
267  if (I->getOpcode() == Instruction::Mul) {
268    if (!LHSBO && LHS->hasOneUse() && BinaryOperator::isNeg(LHS)) {
269      LHS = LowerNegateToMultiply(cast<Instruction>(LHS),
270                                  ValueRankMap, Context);
271      LHSBO = isReassociableOp(LHS, Opcode);
272    }
273    if (!RHSBO && RHS->hasOneUse() && BinaryOperator::isNeg(RHS)) {
274      RHS = LowerNegateToMultiply(cast<Instruction>(RHS),
275                                  ValueRankMap, Context);
276      RHSBO = isReassociableOp(RHS, Opcode);
277    }
278  }
279
280  if (!LHSBO) {
281    if (!RHSBO) {
282      // Neither the LHS or RHS as part of the tree, thus this is a leaf.  As
283      // such, just remember these operands and their rank.
284      Ops.push_back(ValueEntry(getRank(LHS), LHS));
285      Ops.push_back(ValueEntry(getRank(RHS), RHS));
286
287      // Clear the leaves out.
288      I->setOperand(0, Context.getUndef(I->getType()));
289      I->setOperand(1, Context.getUndef(I->getType()));
290      return;
291    } else {
292      // Turn X+(Y+Z) -> (Y+Z)+X
293      std::swap(LHSBO, RHSBO);
294      std::swap(LHS, RHS);
295      bool Success = !I->swapOperands();
296      assert(Success && "swapOperands failed");
297      Success = false;
298      MadeChange = true;
299    }
300  } else if (RHSBO) {
301    // Turn (A+B)+(C+D) -> (((A+B)+C)+D).  This guarantees the the RHS is not
302    // part of the expression tree.
303    LinearizeExpr(I);
304    LHS = LHSBO = cast<BinaryOperator>(I->getOperand(0));
305    RHS = I->getOperand(1);
306    RHSBO = 0;
307  }
308
309  // Okay, now we know that the LHS is a nested expression and that the RHS is
310  // not.  Perform reassociation.
311  assert(!isReassociableOp(RHS, Opcode) && "LinearizeExpr failed!");
312
313  // Move LHS right before I to make sure that the tree expression dominates all
314  // values.
315  LHSBO->moveBefore(I);
316
317  // Linearize the expression tree on the LHS.
318  LinearizeExprTree(LHSBO, Ops);
319
320  // Remember the RHS operand and its rank.
321  Ops.push_back(ValueEntry(getRank(RHS), RHS));
322
323  // Clear the RHS leaf out.
324  I->setOperand(1, Context.getUndef(I->getType()));
325}
326
327// RewriteExprTree - Now that the operands for this expression tree are
328// linearized and optimized, emit them in-order.  This function is written to be
329// tail recursive.
330void Reassociate::RewriteExprTree(BinaryOperator *I,
331                                  std::vector<ValueEntry> &Ops,
332                                  unsigned i) {
333  if (i+2 == Ops.size()) {
334    if (I->getOperand(0) != Ops[i].Op ||
335        I->getOperand(1) != Ops[i+1].Op) {
336      Value *OldLHS = I->getOperand(0);
337      DOUT << "RA: " << *I;
338      I->setOperand(0, Ops[i].Op);
339      I->setOperand(1, Ops[i+1].Op);
340      DOUT << "TO: " << *I;
341      MadeChange = true;
342      ++NumChanged;
343
344      // If we reassociated a tree to fewer operands (e.g. (1+a+2) -> (a+3)
345      // delete the extra, now dead, nodes.
346      RemoveDeadBinaryOp(OldLHS);
347    }
348    return;
349  }
350  assert(i+2 < Ops.size() && "Ops index out of range!");
351
352  if (I->getOperand(1) != Ops[i].Op) {
353    DOUT << "RA: " << *I;
354    I->setOperand(1, Ops[i].Op);
355    DOUT << "TO: " << *I;
356    MadeChange = true;
357    ++NumChanged;
358  }
359
360  BinaryOperator *LHS = cast<BinaryOperator>(I->getOperand(0));
361  assert(LHS->getOpcode() == I->getOpcode() &&
362         "Improper expression tree!");
363
364  // Compactify the tree instructions together with each other to guarantee
365  // that the expression tree is dominated by all of Ops.
366  LHS->moveBefore(I);
367  RewriteExprTree(LHS, Ops, i+1);
368}
369
370
371
372// NegateValue - Insert instructions before the instruction pointed to by BI,
373// that computes the negative version of the value specified.  The negative
374// version of the value is returned, and BI is left pointing at the instruction
375// that should be processed next by the reassociation pass.
376//
377static Value *NegateValue(LLVMContext &Context, Value *V, Instruction *BI) {
378  // We are trying to expose opportunity for reassociation.  One of the things
379  // that we want to do to achieve this is to push a negation as deep into an
380  // expression chain as possible, to expose the add instructions.  In practice,
381  // this means that we turn this:
382  //   X = -(A+12+C+D)   into    X = -A + -12 + -C + -D = -12 + -A + -C + -D
383  // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate
384  // the constants.  We assume that instcombine will clean up the mess later if
385  // we introduce tons of unnecessary negation instructions...
386  //
387  if (Instruction *I = dyn_cast<Instruction>(V))
388    if (I->getOpcode() == Instruction::Add && I->hasOneUse()) {
389      // Push the negates through the add.
390      I->setOperand(0, NegateValue(Context, I->getOperand(0), BI));
391      I->setOperand(1, NegateValue(Context, I->getOperand(1), BI));
392
393      // We must move the add instruction here, because the neg instructions do
394      // not dominate the old add instruction in general.  By moving it, we are
395      // assured that the neg instructions we just inserted dominate the
396      // instruction we are about to insert after them.
397      //
398      I->moveBefore(BI);
399      I->setName(I->getName()+".neg");
400      return I;
401    }
402
403  // Insert a 'neg' instruction that subtracts the value from zero to get the
404  // negation.
405  //
406  return BinaryOperator::CreateNeg(Context, V, V->getName() + ".neg", BI);
407}
408
409/// ShouldBreakUpSubtract - Return true if we should break up this subtract of
410/// X-Y into (X + -Y).
411static bool ShouldBreakUpSubtract(LLVMContext &Context, Instruction *Sub) {
412  // If this is a negation, we can't split it up!
413  if (BinaryOperator::isNeg(Sub))
414    return false;
415
416  // Don't bother to break this up unless either the LHS is an associable add or
417  // subtract or if this is only used by one.
418  if (isReassociableOp(Sub->getOperand(0), Instruction::Add) ||
419      isReassociableOp(Sub->getOperand(0), Instruction::Sub))
420    return true;
421  if (isReassociableOp(Sub->getOperand(1), Instruction::Add) ||
422      isReassociableOp(Sub->getOperand(1), Instruction::Sub))
423    return true;
424  if (Sub->hasOneUse() &&
425      (isReassociableOp(Sub->use_back(), Instruction::Add) ||
426       isReassociableOp(Sub->use_back(), Instruction::Sub)))
427    return true;
428
429  return false;
430}
431
432/// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is
433/// only used by an add, transform this into (X+(0-Y)) to promote better
434/// reassociation.
435static Instruction *BreakUpSubtract(LLVMContext &Context, Instruction *Sub,
436                              std::map<AssertingVH<>, unsigned> &ValueRankMap) {
437  // Convert a subtract into an add and a neg instruction... so that sub
438  // instructions can be commuted with other add instructions...
439  //
440  // Calculate the negative value of Operand 1 of the sub instruction...
441  // and set it as the RHS of the add instruction we just made...
442  //
443  Value *NegVal = NegateValue(Context, Sub->getOperand(1), Sub);
444  Instruction *New =
445    BinaryOperator::CreateAdd(Sub->getOperand(0), NegVal, "", Sub);
446  New->takeName(Sub);
447
448  // Everyone now refers to the add instruction.
449  ValueRankMap.erase(Sub);
450  Sub->replaceAllUsesWith(New);
451  Sub->eraseFromParent();
452
453  DOUT << "Negated: " << *New;
454  return New;
455}
456
457/// ConvertShiftToMul - If this is a shift of a reassociable multiply or is used
458/// by one, change this into a multiply by a constant to assist with further
459/// reassociation.
460static Instruction *ConvertShiftToMul(Instruction *Shl,
461                              std::map<AssertingVH<>, unsigned> &ValueRankMap,
462                              LLVMContext &Context) {
463  // If an operand of this shift is a reassociable multiply, or if the shift
464  // is used by a reassociable multiply or add, turn into a multiply.
465  if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) ||
466      (Shl->hasOneUse() &&
467       (isReassociableOp(Shl->use_back(), Instruction::Mul) ||
468        isReassociableOp(Shl->use_back(), Instruction::Add)))) {
469    Constant *MulCst = ConstantInt::get(Shl->getType(), 1);
470    MulCst =
471        ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1)));
472
473    Instruction *Mul = BinaryOperator::CreateMul(Shl->getOperand(0), MulCst,
474                                                 "", Shl);
475    ValueRankMap.erase(Shl);
476    Mul->takeName(Shl);
477    Shl->replaceAllUsesWith(Mul);
478    Shl->eraseFromParent();
479    return Mul;
480  }
481  return 0;
482}
483
484// Scan backwards and forwards among values with the same rank as element i to
485// see if X exists.  If X does not exist, return i.
486static unsigned FindInOperandList(std::vector<ValueEntry> &Ops, unsigned i,
487                                  Value *X) {
488  unsigned XRank = Ops[i].Rank;
489  unsigned e = Ops.size();
490  for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j)
491    if (Ops[j].Op == X)
492      return j;
493  // Scan backwards
494  for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j)
495    if (Ops[j].Op == X)
496      return j;
497  return i;
498}
499
500/// EmitAddTreeOfValues - Emit a tree of add instructions, summing Ops together
501/// and returning the result.  Insert the tree before I.
502static Value *EmitAddTreeOfValues(Instruction *I, std::vector<Value*> &Ops) {
503  if (Ops.size() == 1) return Ops.back();
504
505  Value *V1 = Ops.back();
506  Ops.pop_back();
507  Value *V2 = EmitAddTreeOfValues(I, Ops);
508  return BinaryOperator::CreateAdd(V2, V1, "tmp", I);
509}
510
511/// RemoveFactorFromExpression - If V is an expression tree that is a
512/// multiplication sequence, and if this sequence contains a multiply by Factor,
513/// remove Factor from the tree and return the new tree.
514Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
515  BinaryOperator *BO = isReassociableOp(V, Instruction::Mul);
516  if (!BO) return 0;
517
518  std::vector<ValueEntry> Factors;
519  LinearizeExprTree(BO, Factors);
520
521  bool FoundFactor = false;
522  for (unsigned i = 0, e = Factors.size(); i != e; ++i)
523    if (Factors[i].Op == Factor) {
524      FoundFactor = true;
525      Factors.erase(Factors.begin()+i);
526      break;
527    }
528  if (!FoundFactor) {
529    // Make sure to restore the operands to the expression tree.
530    RewriteExprTree(BO, Factors);
531    return 0;
532  }
533
534  if (Factors.size() == 1) return Factors[0].Op;
535
536  RewriteExprTree(BO, Factors);
537  return BO;
538}
539
540/// FindSingleUseMultiplyFactors - If V is a single-use multiply, recursively
541/// add its operands as factors, otherwise add V to the list of factors.
542static void FindSingleUseMultiplyFactors(Value *V,
543                                         std::vector<Value*> &Factors) {
544  BinaryOperator *BO;
545  if ((!V->hasOneUse() && !V->use_empty()) ||
546      !(BO = dyn_cast<BinaryOperator>(V)) ||
547      BO->getOpcode() != Instruction::Mul) {
548    Factors.push_back(V);
549    return;
550  }
551
552  // Otherwise, add the LHS and RHS to the list of factors.
553  FindSingleUseMultiplyFactors(BO->getOperand(1), Factors);
554  FindSingleUseMultiplyFactors(BO->getOperand(0), Factors);
555}
556
557
558
559Value *Reassociate::OptimizeExpression(BinaryOperator *I,
560                                       std::vector<ValueEntry> &Ops) {
561  // Now that we have the linearized expression tree, try to optimize it.
562  // Start by folding any constants that we found.
563  bool IterateOptimization = false;
564  if (Ops.size() == 1) return Ops[0].Op;
565
566  LLVMContext &Context = I->getContext();
567
568  unsigned Opcode = I->getOpcode();
569
570  if (Constant *V1 = dyn_cast<Constant>(Ops[Ops.size()-2].Op))
571    if (Constant *V2 = dyn_cast<Constant>(Ops.back().Op)) {
572      Ops.pop_back();
573      Ops.back().Op = ConstantExpr::get(Opcode, V1, V2);
574      return OptimizeExpression(I, Ops);
575    }
576
577  // Check for destructive annihilation due to a constant being used.
578  if (ConstantInt *CstVal = dyn_cast<ConstantInt>(Ops.back().Op))
579    switch (Opcode) {
580    default: break;
581    case Instruction::And:
582      if (CstVal->isZero()) {                // ... & 0 -> 0
583        ++NumAnnihil;
584        return CstVal;
585      } else if (CstVal->isAllOnesValue()) { // ... & -1 -> ...
586        Ops.pop_back();
587      }
588      break;
589    case Instruction::Mul:
590      if (CstVal->isZero()) {                // ... * 0 -> 0
591        ++NumAnnihil;
592        return CstVal;
593      } else if (cast<ConstantInt>(CstVal)->isOne()) {
594        Ops.pop_back();                      // ... * 1 -> ...
595      }
596      break;
597    case Instruction::Or:
598      if (CstVal->isAllOnesValue()) {        // ... | -1 -> -1
599        ++NumAnnihil;
600        return CstVal;
601      }
602      // FALLTHROUGH!
603    case Instruction::Add:
604    case Instruction::Xor:
605      if (CstVal->isZero())                  // ... [|^+] 0 -> ...
606        Ops.pop_back();
607      break;
608    }
609  if (Ops.size() == 1) return Ops[0].Op;
610
611  // Handle destructive annihilation do to identities between elements in the
612  // argument list here.
613  switch (Opcode) {
614  default: break;
615  case Instruction::And:
616  case Instruction::Or:
617  case Instruction::Xor:
618    // Scan the operand lists looking for X and ~X pairs, along with X,X pairs.
619    // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1.
620    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
621      // First, check for X and ~X in the operand list.
622      assert(i < Ops.size());
623      if (BinaryOperator::isNot(Ops[i].Op)) {    // Cannot occur for ^.
624        Value *X = BinaryOperator::getNotArgument(Ops[i].Op);
625        unsigned FoundX = FindInOperandList(Ops, i, X);
626        if (FoundX != i) {
627          if (Opcode == Instruction::And) {   // ...&X&~X = 0
628            ++NumAnnihil;
629            return Context.getNullValue(X->getType());
630          } else if (Opcode == Instruction::Or) {   // ...|X|~X = -1
631            ++NumAnnihil;
632            return Context.getAllOnesValue(X->getType());
633          }
634        }
635      }
636
637      // Next, check for duplicate pairs of values, which we assume are next to
638      // each other, due to our sorting criteria.
639      assert(i < Ops.size());
640      if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) {
641        if (Opcode == Instruction::And || Opcode == Instruction::Or) {
642          // Drop duplicate values.
643          Ops.erase(Ops.begin()+i);
644          --i; --e;
645          IterateOptimization = true;
646          ++NumAnnihil;
647        } else {
648          assert(Opcode == Instruction::Xor);
649          if (e == 2) {
650            ++NumAnnihil;
651            return Context.getNullValue(Ops[0].Op->getType());
652          }
653          // ... X^X -> ...
654          Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
655          i -= 1; e -= 2;
656          IterateOptimization = true;
657          ++NumAnnihil;
658        }
659      }
660    }
661    break;
662
663  case Instruction::Add:
664    // Scan the operand lists looking for X and -X pairs.  If we find any, we
665    // can simplify the expression. X+-X == 0.
666    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
667      assert(i < Ops.size());
668      // Check for X and -X in the operand list.
669      if (BinaryOperator::isNeg(Ops[i].Op)) {
670        Value *X = BinaryOperator::getNegArgument(Ops[i].Op);
671        unsigned FoundX = FindInOperandList(Ops, i, X);
672        if (FoundX != i) {
673          // Remove X and -X from the operand list.
674          if (Ops.size() == 2) {
675            ++NumAnnihil;
676            return Context.getNullValue(X->getType());
677          } else {
678            Ops.erase(Ops.begin()+i);
679            if (i < FoundX)
680              --FoundX;
681            else
682              --i;   // Need to back up an extra one.
683            Ops.erase(Ops.begin()+FoundX);
684            IterateOptimization = true;
685            ++NumAnnihil;
686            --i;     // Revisit element.
687            e -= 2;  // Removed two elements.
688          }
689        }
690      }
691    }
692
693
694    // Scan the operand list, checking to see if there are any common factors
695    // between operands.  Consider something like A*A+A*B*C+D.  We would like to
696    // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies.
697    // To efficiently find this, we count the number of times a factor occurs
698    // for any ADD operands that are MULs.
699    std::map<Value*, unsigned> FactorOccurrences;
700    unsigned MaxOcc = 0;
701    Value *MaxOccVal = 0;
702    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
703      if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op)) {
704        if (BOp->getOpcode() == Instruction::Mul && BOp->use_empty()) {
705          // Compute all of the factors of this added value.
706          std::vector<Value*> Factors;
707          FindSingleUseMultiplyFactors(BOp, Factors);
708          assert(Factors.size() > 1 && "Bad linearize!");
709
710          // Add one to FactorOccurrences for each unique factor in this op.
711          if (Factors.size() == 2) {
712            unsigned Occ = ++FactorOccurrences[Factors[0]];
713            if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factors[0]; }
714            if (Factors[0] != Factors[1]) {   // Don't double count A*A.
715              Occ = ++FactorOccurrences[Factors[1]];
716              if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factors[1]; }
717            }
718          } else {
719            std::set<Value*> Duplicates;
720            for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
721              if (Duplicates.insert(Factors[i]).second) {
722                unsigned Occ = ++FactorOccurrences[Factors[i]];
723                if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factors[i]; }
724              }
725            }
726          }
727        }
728      }
729    }
730
731    // If any factor occurred more than one time, we can pull it out.
732    if (MaxOcc > 1) {
733      DOUT << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << "\n";
734
735      // Create a new instruction that uses the MaxOccVal twice.  If we don't do
736      // this, we could otherwise run into situations where removing a factor
737      // from an expression will drop a use of maxocc, and this can cause
738      // RemoveFactorFromExpression on successive values to behave differently.
739      Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal);
740      std::vector<Value*> NewMulOps;
741      for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
742        if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) {
743          NewMulOps.push_back(V);
744          Ops.erase(Ops.begin()+i);
745          --i; --e;
746        }
747      }
748
749      // No need for extra uses anymore.
750      delete DummyInst;
751
752      unsigned NumAddedValues = NewMulOps.size();
753      Value *V = EmitAddTreeOfValues(I, NewMulOps);
754      Value *V2 = BinaryOperator::CreateMul(V, MaxOccVal, "tmp", I);
755
756      // Now that we have inserted V and its sole use, optimize it. This allows
757      // us to handle cases that require multiple factoring steps, such as this:
758      // A*A*B + A*A*C   -->   A*(A*B+A*C)   -->   A*(A*(B+C))
759      if (NumAddedValues > 1)
760        ReassociateExpression(cast<BinaryOperator>(V));
761
762      ++NumFactor;
763
764      if (Ops.empty())
765        return V2;
766
767      // Add the new value to the list of things being added.
768      Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2));
769
770      // Rewrite the tree so that there is now a use of V.
771      RewriteExprTree(I, Ops);
772      return OptimizeExpression(I, Ops);
773    }
774    break;
775  //case Instruction::Mul:
776  }
777
778  if (IterateOptimization)
779    return OptimizeExpression(I, Ops);
780  return 0;
781}
782
783
784/// ReassociateBB - Inspect all of the instructions in this basic block,
785/// reassociating them as we go.
786void Reassociate::ReassociateBB(BasicBlock *BB) {
787  LLVMContext &Context = BB->getContext();
788
789  for (BasicBlock::iterator BBI = BB->begin(); BBI != BB->end(); ) {
790    Instruction *BI = BBI++;
791    if (BI->getOpcode() == Instruction::Shl &&
792        isa<ConstantInt>(BI->getOperand(1)))
793      if (Instruction *NI = ConvertShiftToMul(BI, ValueRankMap, Context)) {
794        MadeChange = true;
795        BI = NI;
796      }
797
798    // Reject cases where it is pointless to do this.
799    if (!isa<BinaryOperator>(BI) || BI->getType()->isFloatingPoint() ||
800        isa<VectorType>(BI->getType()))
801      continue;  // Floating point ops are not associative.
802
803    // If this is a subtract instruction which is not already in negate form,
804    // see if we can convert it to X+-Y.
805    if (BI->getOpcode() == Instruction::Sub) {
806      if (ShouldBreakUpSubtract(Context, BI)) {
807        BI = BreakUpSubtract(Context, BI, ValueRankMap);
808        MadeChange = true;
809      } else if (BinaryOperator::isNeg(BI)) {
810        // Otherwise, this is a negation.  See if the operand is a multiply tree
811        // and if this is not an inner node of a multiply tree.
812        if (isReassociableOp(BI->getOperand(1), Instruction::Mul) &&
813            (!BI->hasOneUse() ||
814             !isReassociableOp(BI->use_back(), Instruction::Mul))) {
815          BI = LowerNegateToMultiply(BI, ValueRankMap, Context);
816          MadeChange = true;
817        }
818      }
819    }
820
821    // If this instruction is a commutative binary operator, process it.
822    if (!BI->isAssociative()) continue;
823    BinaryOperator *I = cast<BinaryOperator>(BI);
824
825    // If this is an interior node of a reassociable tree, ignore it until we
826    // get to the root of the tree, to avoid N^2 analysis.
827    if (I->hasOneUse() && isReassociableOp(I->use_back(), I->getOpcode()))
828      continue;
829
830    // If this is an add tree that is used by a sub instruction, ignore it
831    // until we process the subtract.
832    if (I->hasOneUse() && I->getOpcode() == Instruction::Add &&
833        cast<Instruction>(I->use_back())->getOpcode() == Instruction::Sub)
834      continue;
835
836    ReassociateExpression(I);
837  }
838}
839
840void Reassociate::ReassociateExpression(BinaryOperator *I) {
841
842  // First, walk the expression tree, linearizing the tree, collecting
843  std::vector<ValueEntry> Ops;
844  LinearizeExprTree(I, Ops);
845
846  DOUT << "RAIn:\t"; DEBUG(PrintOps(I, Ops)); DOUT << "\n";
847
848  // Now that we have linearized the tree to a list and have gathered all of
849  // the operands and their ranks, sort the operands by their rank.  Use a
850  // stable_sort so that values with equal ranks will have their relative
851  // positions maintained (and so the compiler is deterministic).  Note that
852  // this sorts so that the highest ranking values end up at the beginning of
853  // the vector.
854  std::stable_sort(Ops.begin(), Ops.end());
855
856  // OptimizeExpression - Now that we have the expression tree in a convenient
857  // sorted form, optimize it globally if possible.
858  if (Value *V = OptimizeExpression(I, Ops)) {
859    // This expression tree simplified to something that isn't a tree,
860    // eliminate it.
861    DOUT << "Reassoc to scalar: " << *V << "\n";
862    I->replaceAllUsesWith(V);
863    RemoveDeadBinaryOp(I);
864    return;
865  }
866
867  // We want to sink immediates as deeply as possible except in the case where
868  // this is a multiply tree used only by an add, and the immediate is a -1.
869  // In this case we reassociate to put the negation on the outside so that we
870  // can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
871  if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
872      cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add &&
873      isa<ConstantInt>(Ops.back().Op) &&
874      cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
875    Ops.insert(Ops.begin(), Ops.back());
876    Ops.pop_back();
877  }
878
879  DOUT << "RAOut:\t"; DEBUG(PrintOps(I, Ops)); DOUT << "\n";
880
881  if (Ops.size() == 1) {
882    // This expression tree simplified to something that isn't a tree,
883    // eliminate it.
884    I->replaceAllUsesWith(Ops[0].Op);
885    RemoveDeadBinaryOp(I);
886  } else {
887    // Now that we ordered and optimized the expressions, splat them back into
888    // the expression tree, removing any unneeded nodes.
889    RewriteExprTree(I, Ops);
890  }
891}
892
893
894bool Reassociate::runOnFunction(Function &F) {
895  // Recalculate the rank map for F
896  BuildRankMap(F);
897
898  MadeChange = false;
899  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
900    ReassociateBB(FI);
901
902  // We are done with the rank map...
903  RankMap.clear();
904  ValueRankMap.clear();
905  return MadeChange;
906}
907
908