1//===- AddrModeMatcher.cpp - Addressing mode matching facility --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements target addressing mode matcher class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/AddrModeMatcher.h"
15#include "llvm/DerivedTypes.h"
16#include "llvm/GlobalValue.h"
17#include "llvm/Instruction.h"
18#include "llvm/Assembly/Writer.h"
19#include "llvm/Target/TargetData.h"
20#include "llvm/Support/Debug.h"
21#include "llvm/Support/GetElementPtrTypeIterator.h"
22#include "llvm/Support/PatternMatch.h"
23#include "llvm/Support/raw_ostream.h"
24#include "llvm/Support/CallSite.h"
25
26using namespace llvm;
27using namespace llvm::PatternMatch;
28
29void ExtAddrMode::print(raw_ostream &OS) const {
30  bool NeedPlus = false;
31  OS << "[";
32  if (BaseGV) {
33    OS << (NeedPlus ? " + " : "")
34       << "GV:";
35    WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
36    NeedPlus = true;
37  }
38
39  if (BaseOffs)
40    OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
41
42  if (BaseReg) {
43    OS << (NeedPlus ? " + " : "")
44       << "Base:";
45    WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
46    NeedPlus = true;
47  }
48  if (Scale) {
49    OS << (NeedPlus ? " + " : "")
50       << Scale << "*";
51    WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
52    NeedPlus = true;
53  }
54
55  OS << ']';
56}
57
58void ExtAddrMode::dump() const {
59  print(dbgs());
60  dbgs() << '\n';
61}
62
63
64/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
65/// Return true and update AddrMode if this addr mode is legal for the target,
66/// false if not.
67bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
68                                             unsigned Depth) {
69  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
70  // mode.  Just process that directly.
71  if (Scale == 1)
72    return MatchAddr(ScaleReg, Depth);
73
74  // If the scale is 0, it takes nothing to add this.
75  if (Scale == 0)
76    return true;
77
78  // If we already have a scale of this value, we can add to it, otherwise, we
79  // need an available scale field.
80  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
81    return false;
82
83  ExtAddrMode TestAddrMode = AddrMode;
84
85  // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
86  // [A+B + A*7] -> [B+A*8].
87  TestAddrMode.Scale += Scale;
88  TestAddrMode.ScaledReg = ScaleReg;
89
90  // If the new address isn't legal, bail out.
91  if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
92    return false;
93
94  // It was legal, so commit it.
95  AddrMode = TestAddrMode;
96
97  // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
98  // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
99  // X*Scale + C*Scale to addr mode.
100  ConstantInt *CI = 0; Value *AddLHS = 0;
101  if (isa<Instruction>(ScaleReg) &&  // not a constant expr.
102      match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
103    TestAddrMode.ScaledReg = AddLHS;
104    TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
105
106    // If this addressing mode is legal, commit it and remember that we folded
107    // this instruction.
108    if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
109      AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
110      AddrMode = TestAddrMode;
111      return true;
112    }
113  }
114
115  // Otherwise, not (x+c)*scale, just return what we have.
116  return true;
117}
118
119/// MightBeFoldableInst - This is a little filter, which returns true if an
120/// addressing computation involving I might be folded into a load/store
121/// accessing it.  This doesn't need to be perfect, but needs to accept at least
122/// the set of instructions that MatchOperationAddr can.
123static bool MightBeFoldableInst(Instruction *I) {
124  switch (I->getOpcode()) {
125  case Instruction::BitCast:
126    // Don't touch identity bitcasts.
127    if (I->getType() == I->getOperand(0)->getType())
128      return false;
129    return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
130  case Instruction::PtrToInt:
131    // PtrToInt is always a noop, as we know that the int type is pointer sized.
132    return true;
133  case Instruction::IntToPtr:
134    // We know the input is intptr_t, so this is foldable.
135    return true;
136  case Instruction::Add:
137    return true;
138  case Instruction::Mul:
139  case Instruction::Shl:
140    // Can only handle X*C and X << C.
141    return isa<ConstantInt>(I->getOperand(1));
142  case Instruction::GetElementPtr:
143    return true;
144  default:
145    return false;
146  }
147}
148
149
150/// MatchOperationAddr - Given an instruction or constant expr, see if we can
151/// fold the operation into the addressing mode.  If so, update the addressing
152/// mode and return true, otherwise return false without modifying AddrMode.
153bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
154                                               unsigned Depth) {
155  // Avoid exponential behavior on extremely deep expression trees.
156  if (Depth >= 5) return false;
157
158  switch (Opcode) {
159  case Instruction::PtrToInt:
160    // PtrToInt is always a noop, as we know that the int type is pointer sized.
161    return MatchAddr(AddrInst->getOperand(0), Depth);
162  case Instruction::IntToPtr:
163    // This inttoptr is a no-op if the integer type is pointer sized.
164    if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
165        TLI.getPointerTy())
166      return MatchAddr(AddrInst->getOperand(0), Depth);
167    return false;
168  case Instruction::BitCast:
169    // BitCast is always a noop, and we can handle it as long as it is
170    // int->int or pointer->pointer (we don't want int<->fp or something).
171    if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
172         AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
173        // Don't touch identity bitcasts.  These were probably put here by LSR,
174        // and we don't want to mess around with them.  Assume it knows what it
175        // is doing.
176        AddrInst->getOperand(0)->getType() != AddrInst->getType())
177      return MatchAddr(AddrInst->getOperand(0), Depth);
178    return false;
179  case Instruction::Add: {
180    // Check to see if we can merge in the RHS then the LHS.  If so, we win.
181    ExtAddrMode BackupAddrMode = AddrMode;
182    unsigned OldSize = AddrModeInsts.size();
183    if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
184        MatchAddr(AddrInst->getOperand(0), Depth+1))
185      return true;
186
187    // Restore the old addr mode info.
188    AddrMode = BackupAddrMode;
189    AddrModeInsts.resize(OldSize);
190
191    // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
192    if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
193        MatchAddr(AddrInst->getOperand(1), Depth+1))
194      return true;
195
196    // Otherwise we definitely can't merge the ADD in.
197    AddrMode = BackupAddrMode;
198    AddrModeInsts.resize(OldSize);
199    break;
200  }
201  //case Instruction::Or:
202  // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
203  //break;
204  case Instruction::Mul:
205  case Instruction::Shl: {
206    // Can only handle X*C and X << C.
207    ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
208    if (!RHS) return false;
209    int64_t Scale = RHS->getSExtValue();
210    if (Opcode == Instruction::Shl)
211      Scale = 1LL << Scale;
212
213    return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
214  }
215  case Instruction::GetElementPtr: {
216    // Scan the GEP.  We check it if it contains constant offsets and at most
217    // one variable offset.
218    int VariableOperand = -1;
219    unsigned VariableScale = 0;
220
221    int64_t ConstantOffset = 0;
222    const TargetData *TD = TLI.getTargetData();
223    gep_type_iterator GTI = gep_type_begin(AddrInst);
224    for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
225      if (StructType *STy = dyn_cast<StructType>(*GTI)) {
226        const StructLayout *SL = TD->getStructLayout(STy);
227        unsigned Idx =
228          cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
229        ConstantOffset += SL->getElementOffset(Idx);
230      } else {
231        uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
232        if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
233          ConstantOffset += CI->getSExtValue()*TypeSize;
234        } else if (TypeSize) {  // Scales of zero don't do anything.
235          // We only allow one variable index at the moment.
236          if (VariableOperand != -1)
237            return false;
238
239          // Remember the variable index.
240          VariableOperand = i;
241          VariableScale = TypeSize;
242        }
243      }
244    }
245
246    // A common case is for the GEP to only do a constant offset.  In this case,
247    // just add it to the disp field and check validity.
248    if (VariableOperand == -1) {
249      AddrMode.BaseOffs += ConstantOffset;
250      if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
251        // Check to see if we can fold the base pointer in too.
252        if (MatchAddr(AddrInst->getOperand(0), Depth+1))
253          return true;
254      }
255      AddrMode.BaseOffs -= ConstantOffset;
256      return false;
257    }
258
259    // Save the valid addressing mode in case we can't match.
260    ExtAddrMode BackupAddrMode = AddrMode;
261    unsigned OldSize = AddrModeInsts.size();
262
263    // See if the scale and offset amount is valid for this target.
264    AddrMode.BaseOffs += ConstantOffset;
265
266    // Match the base operand of the GEP.
267    if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
268      // If it couldn't be matched, just stuff the value in a register.
269      if (AddrMode.HasBaseReg) {
270        AddrMode = BackupAddrMode;
271        AddrModeInsts.resize(OldSize);
272        return false;
273      }
274      AddrMode.HasBaseReg = true;
275      AddrMode.BaseReg = AddrInst->getOperand(0);
276    }
277
278    // Match the remaining variable portion of the GEP.
279    if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
280                          Depth)) {
281      // If it couldn't be matched, try stuffing the base into a register
282      // instead of matching it, and retrying the match of the scale.
283      AddrMode = BackupAddrMode;
284      AddrModeInsts.resize(OldSize);
285      if (AddrMode.HasBaseReg)
286        return false;
287      AddrMode.HasBaseReg = true;
288      AddrMode.BaseReg = AddrInst->getOperand(0);
289      AddrMode.BaseOffs += ConstantOffset;
290      if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
291                            VariableScale, Depth)) {
292        // If even that didn't work, bail.
293        AddrMode = BackupAddrMode;
294        AddrModeInsts.resize(OldSize);
295        return false;
296      }
297    }
298
299    return true;
300  }
301  }
302  return false;
303}
304
305/// MatchAddr - If we can, try to add the value of 'Addr' into the current
306/// addressing mode.  If Addr can't be added to AddrMode this returns false and
307/// leaves AddrMode unmodified.  This assumes that Addr is either a pointer type
308/// or intptr_t for the target.
309///
310bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
311  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
312    // Fold in immediates if legal for the target.
313    AddrMode.BaseOffs += CI->getSExtValue();
314    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
315      return true;
316    AddrMode.BaseOffs -= CI->getSExtValue();
317  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
318    // If this is a global variable, try to fold it into the addressing mode.
319    if (AddrMode.BaseGV == 0) {
320      AddrMode.BaseGV = GV;
321      if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
322        return true;
323      AddrMode.BaseGV = 0;
324    }
325  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
326    ExtAddrMode BackupAddrMode = AddrMode;
327    unsigned OldSize = AddrModeInsts.size();
328
329    // Check to see if it is possible to fold this operation.
330    if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
331      // Okay, it's possible to fold this.  Check to see if it is actually
332      // *profitable* to do so.  We use a simple cost model to avoid increasing
333      // register pressure too much.
334      if (I->hasOneUse() ||
335          IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
336        AddrModeInsts.push_back(I);
337        return true;
338      }
339
340      // It isn't profitable to do this, roll back.
341      //cerr << "NOT FOLDING: " << *I;
342      AddrMode = BackupAddrMode;
343      AddrModeInsts.resize(OldSize);
344    }
345  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
346    if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
347      return true;
348  } else if (isa<ConstantPointerNull>(Addr)) {
349    // Null pointer gets folded without affecting the addressing mode.
350    return true;
351  }
352
353  // Worse case, the target should support [reg] addressing modes. :)
354  if (!AddrMode.HasBaseReg) {
355    AddrMode.HasBaseReg = true;
356    AddrMode.BaseReg = Addr;
357    // Still check for legality in case the target supports [imm] but not [i+r].
358    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
359      return true;
360    AddrMode.HasBaseReg = false;
361    AddrMode.BaseReg = 0;
362  }
363
364  // If the base register is already taken, see if we can do [r+r].
365  if (AddrMode.Scale == 0) {
366    AddrMode.Scale = 1;
367    AddrMode.ScaledReg = Addr;
368    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
369      return true;
370    AddrMode.Scale = 0;
371    AddrMode.ScaledReg = 0;
372  }
373  // Couldn't match.
374  return false;
375}
376
377
378/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
379/// inline asm call are due to memory operands.  If so, return true, otherwise
380/// return false.
381static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
382                                    const TargetLowering &TLI) {
383  TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
384  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
385    TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
386
387    // Compute the constraint code and ConstraintType to use.
388    TLI.ComputeConstraintToUse(OpInfo, SDValue());
389
390    // If this asm operand is our Value*, and if it isn't an indirect memory
391    // operand, we can't fold it!
392    if (OpInfo.CallOperandVal == OpVal &&
393        (OpInfo.ConstraintType != TargetLowering::C_Memory ||
394         !OpInfo.isIndirect))
395      return false;
396  }
397
398  return true;
399}
400
401
402/// FindAllMemoryUses - Recursively walk all the uses of I until we find a
403/// memory use.  If we find an obviously non-foldable instruction, return true.
404/// Add the ultimately found memory instructions to MemoryUses.
405static bool FindAllMemoryUses(Instruction *I,
406                SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
407                              SmallPtrSet<Instruction*, 16> &ConsideredInsts,
408                              const TargetLowering &TLI) {
409  // If we already considered this instruction, we're done.
410  if (!ConsideredInsts.insert(I))
411    return false;
412
413  // If this is an obviously unfoldable instruction, bail out.
414  if (!MightBeFoldableInst(I))
415    return true;
416
417  // Loop over all the uses, recursively processing them.
418  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
419       UI != E; ++UI) {
420    User *U = *UI;
421
422    if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
423      MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
424      continue;
425    }
426
427    if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
428      unsigned opNo = UI.getOperandNo();
429      if (opNo == 0) return true; // Storing addr, not into addr.
430      MemoryUses.push_back(std::make_pair(SI, opNo));
431      continue;
432    }
433
434    if (CallInst *CI = dyn_cast<CallInst>(U)) {
435      InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
436      if (!IA) return true;
437
438      // If this is a memory operand, we're cool, otherwise bail out.
439      if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
440        return true;
441      continue;
442    }
443
444    if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
445                          TLI))
446      return true;
447  }
448
449  return false;
450}
451
452
453/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
454/// the use site that we're folding it into.  If so, there is no cost to
455/// include it in the addressing mode.  KnownLive1 and KnownLive2 are two values
456/// that we know are live at the instruction already.
457bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
458                                                   Value *KnownLive2) {
459  // If Val is either of the known-live values, we know it is live!
460  if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
461    return true;
462
463  // All values other than instructions and arguments (e.g. constants) are live.
464  if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
465
466  // If Val is a constant sized alloca in the entry block, it is live, this is
467  // true because it is just a reference to the stack/frame pointer, which is
468  // live for the whole function.
469  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
470    if (AI->isStaticAlloca())
471      return true;
472
473  // Check to see if this value is already used in the memory instruction's
474  // block.  If so, it's already live into the block at the very least, so we
475  // can reasonably fold it.
476  BasicBlock *MemBB = MemoryInst->getParent();
477  for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end();
478       UI != E; ++UI)
479    // We know that uses of arguments and instructions have to be instructions.
480    if (cast<Instruction>(*UI)->getParent() == MemBB)
481      return true;
482
483  return false;
484}
485
486
487
488/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
489/// mode of the machine to fold the specified instruction into a load or store
490/// that ultimately uses it.  However, the specified instruction has multiple
491/// uses.  Given this, it may actually increase register pressure to fold it
492/// into the load.  For example, consider this code:
493///
494///     X = ...
495///     Y = X+1
496///     use(Y)   -> nonload/store
497///     Z = Y+1
498///     load Z
499///
500/// In this case, Y has multiple uses, and can be folded into the load of Z
501/// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
502/// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
503/// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
504/// number of computations either.
505///
506/// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
507/// X was live across 'load Z' for other reasons, we actually *would* want to
508/// fold the addressing mode in the Z case.  This would make Y die earlier.
509bool AddressingModeMatcher::
510IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
511                                     ExtAddrMode &AMAfter) {
512  if (IgnoreProfitability) return true;
513
514  // AMBefore is the addressing mode before this instruction was folded into it,
515  // and AMAfter is the addressing mode after the instruction was folded.  Get
516  // the set of registers referenced by AMAfter and subtract out those
517  // referenced by AMBefore: this is the set of values which folding in this
518  // address extends the lifetime of.
519  //
520  // Note that there are only two potential values being referenced here,
521  // BaseReg and ScaleReg (global addresses are always available, as are any
522  // folded immediates).
523  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
524
525  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
526  // lifetime wasn't extended by adding this instruction.
527  if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
528    BaseReg = 0;
529  if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
530    ScaledReg = 0;
531
532  // If folding this instruction (and it's subexprs) didn't extend any live
533  // ranges, we're ok with it.
534  if (BaseReg == 0 && ScaledReg == 0)
535    return true;
536
537  // If all uses of this instruction are ultimately load/store/inlineasm's,
538  // check to see if their addressing modes will include this instruction.  If
539  // so, we can fold it into all uses, so it doesn't matter if it has multiple
540  // uses.
541  SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
542  SmallPtrSet<Instruction*, 16> ConsideredInsts;
543  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
544    return false;  // Has a non-memory, non-foldable use!
545
546  // Now that we know that all uses of this instruction are part of a chain of
547  // computation involving only operations that could theoretically be folded
548  // into a memory use, loop over each of these uses and see if they could
549  // *actually* fold the instruction.
550  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
551  for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
552    Instruction *User = MemoryUses[i].first;
553    unsigned OpNo = MemoryUses[i].second;
554
555    // Get the access type of this use.  If the use isn't a pointer, we don't
556    // know what it accesses.
557    Value *Address = User->getOperand(OpNo);
558    if (!Address->getType()->isPointerTy())
559      return false;
560    Type *AddressAccessTy =
561      cast<PointerType>(Address->getType())->getElementType();
562
563    // Do a match against the root of this address, ignoring profitability. This
564    // will tell us if the addressing mode for the memory operation will
565    // *actually* cover the shared instruction.
566    ExtAddrMode Result;
567    AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
568                                  MemoryInst, Result);
569    Matcher.IgnoreProfitability = true;
570    bool Success = Matcher.MatchAddr(Address, 0);
571    (void)Success; assert(Success && "Couldn't select *anything*?");
572
573    // If the match didn't cover I, then it won't be shared by it.
574    if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
575                  I) == MatchedAddrModeInsts.end())
576      return false;
577
578    MatchedAddrModeInsts.clear();
579  }
580
581  return true;
582}
583