ARMLoadStoreOptimizer.cpp revision 3e15bf33e024b9df9e89351a165acfdb1dde51ed
1//===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by Evan Cheng and is distributed under the
6// University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass that performs load / store related peephole
11// optimizations. This pass should be run after register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "arm-ldst-opt"
16#include "ARM.h"
17#include "ARMAddressingModes.h"
18#include "ARMMachineFunctionInfo.h"
19#include "ARMRegisterInfo.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFunctionPass.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/RegisterScavenging.h"
28#include "llvm/Support/Compiler.h"
29#include "llvm/Target/MRegisterInfo.h"
30#include "llvm/Target/TargetInstrInfo.h"
31#include "llvm/Target/TargetMachine.h"
32using namespace llvm;
33
34STATISTIC(NumLDMGened , "Number of ldm instructions generated");
35STATISTIC(NumSTMGened , "Number of stm instructions generated");
36STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
37STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
38
39namespace {
40  struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
41    static const char ID;
42    ARMLoadStoreOpt() : MachineFunctionPass((intptr_t)&ID) {}
43
44    const TargetInstrInfo *TII;
45    const MRegisterInfo *MRI;
46    ARMFunctionInfo *AFI;
47    RegScavenger *RS;
48
49    virtual bool runOnMachineFunction(MachineFunction &Fn);
50
51    virtual const char *getPassName() const {
52      return "ARM load / store optimization pass";
53    }
54
55  private:
56    struct MemOpQueueEntry {
57      int Offset;
58      unsigned Position;
59      MachineBasicBlock::iterator MBBI;
60      bool Merged;
61      MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
62        : Offset(o), Position(p), MBBI(i), Merged(false) {};
63    };
64    typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
65    typedef MemOpQueue::iterator MemOpQueueIter;
66
67    SmallVector<MachineBasicBlock::iterator, 4>
68    MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
69                 int Opcode, unsigned Size, unsigned Scratch,
70                 MemOpQueue &MemOps);
71
72    void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
73    bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
74    bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
75  };
76  const char ARMLoadStoreOpt::ID = 0;
77}
78
79/// createARMLoadStoreOptimizationPass - returns an instance of the load / store
80/// optimization pass.
81FunctionPass *llvm::createARMLoadStoreOptimizationPass() {
82  return new ARMLoadStoreOpt();
83}
84
85static int getLoadStoreMultipleOpcode(int Opcode) {
86  switch (Opcode) {
87  case ARM::LDR:
88    NumLDMGened++;
89    return ARM::LDM;
90  case ARM::STR:
91    NumSTMGened++;
92    return ARM::STM;
93  case ARM::FLDS:
94    NumFLDMGened++;
95    return ARM::FLDMS;
96  case ARM::FSTS:
97    NumFSTMGened++;
98    return ARM::FSTMS;
99  case ARM::FLDD:
100    NumFLDMGened++;
101    return ARM::FLDMD;
102  case ARM::FSTD:
103    NumFSTMGened++;
104    return ARM::FSTMD;
105  default: abort();
106  }
107  return 0;
108}
109
110/// mergeOps - Create and insert a LDM or STM with Base as base register and
111/// registers in Regs as the register operands that would be loaded / stored.
112/// It returns true if the transformation is done.
113static bool mergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
114                     int Offset, unsigned Base, bool BaseKill, int Opcode,
115                     unsigned Scratch,
116                     SmallVector<std::pair<unsigned, bool>, 8> &Regs,
117                     const TargetInstrInfo *TII) {
118  // Only a single register to load / store. Don't bother.
119  unsigned NumRegs = Regs.size();
120  if (NumRegs <= 1)
121    return false;
122
123  ARM_AM::AMSubMode Mode = ARM_AM::ia;
124  bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
125  if (isAM4 && Offset == 4)
126    Mode = ARM_AM::ib;
127  else if (isAM4 && Offset == -4 * (int)NumRegs + 4)
128    Mode = ARM_AM::da;
129  else if (isAM4 && Offset == -4 * (int)NumRegs)
130    Mode = ARM_AM::db;
131  else if (Offset != 0) {
132    // If starting offset isn't zero, insert a MI to materialize a new base.
133    // But only do so if it is cost effective, i.e. merging more than two
134    // loads / stores.
135    if (NumRegs <= 2)
136      return false;
137
138    unsigned NewBase;
139    if (Opcode == ARM::LDR)
140      // If it is a load, then just use one of the destination register to
141      // use as the new base.
142      NewBase = Regs[NumRegs-1].first;
143    else {
144      // Use the scratch register to use as a new base.
145      NewBase = Scratch;
146      if (NewBase == 0)
147        return false;
148    }
149    int BaseOpc = ARM::ADDri;
150    if (Offset < 0) {
151      BaseOpc = ARM::SUBri;
152      Offset = - Offset;
153    }
154    int ImmedOffset = ARM_AM::getSOImmVal(Offset);
155    if (ImmedOffset == -1)
156      return false;  // Probably not worth it then.
157
158    BuildMI(MBB, MBBI, TII->get(BaseOpc), NewBase)
159      .addReg(Base, false, false, BaseKill).addImm(ImmedOffset);
160    Base = NewBase;
161    BaseKill = true;  // New base is always killed right its use.
162  }
163
164  bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
165  bool isDef = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
166  Opcode = getLoadStoreMultipleOpcode(Opcode);
167  MachineInstrBuilder MIB = (isAM4)
168    ? BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base, false, false, BaseKill)
169        .addImm(ARM_AM::getAM4ModeImm(Mode))
170    : BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base, false, false, BaseKill)
171        .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs));
172  for (unsigned i = 0; i != NumRegs; ++i)
173    MIB = MIB.addReg(Regs[i].first, isDef, false, Regs[i].second);
174
175  return true;
176}
177
178/// MergeLDR_STR - Merge a number of load / store instructions into one or more
179/// load / store multiple instructions.
180SmallVector<MachineBasicBlock::iterator, 4>
181ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
182                              unsigned Base, int Opcode, unsigned Size,
183                              unsigned Scratch, MemOpQueue &MemOps) {
184  SmallVector<MachineBasicBlock::iterator, 4> Merges;
185  SmallVector<std::pair<unsigned,bool>, 8> Regs;
186  bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
187  int Offset = MemOps[SIndex].Offset;
188  int SOffset = Offset;
189  unsigned Pos = MemOps[SIndex].Position;
190  MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
191  unsigned PReg = MemOps[SIndex].MBBI->getOperand(0).getReg();
192  unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
193  bool isKill = MemOps[SIndex].MBBI->getOperand(0).isKill();
194  Regs.push_back(std::make_pair(PReg, isKill));
195  for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
196    int NewOffset = MemOps[i].Offset;
197    unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
198    unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
199    isKill = MemOps[i].MBBI->getOperand(0).isKill();
200    // AM4 - register numbers in ascending order.
201    // AM5 - consecutive register numbers in ascending order.
202    if (NewOffset == Offset + (int)Size &&
203        ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
204      Offset += Size;
205      Regs.push_back(std::make_pair(Reg, isKill));
206      PRegNum = RegNum;
207    } else {
208      // Can't merge this in. Try merge the earlier ones first.
209      if (mergeOps(MBB, ++Loc, SOffset, Base, false, Opcode,Scratch,Regs,TII)) {
210        Merges.push_back(prior(Loc));
211        for (unsigned j = SIndex; j < i; ++j) {
212          MBB.erase(MemOps[j].MBBI);
213          MemOps[j].Merged = true;
214        }
215      }
216      SmallVector<MachineBasicBlock::iterator, 4> Merges2 =
217        MergeLDR_STR(MBB, i, Base, Opcode, Size, Scratch, MemOps);
218      Merges.append(Merges2.begin(), Merges2.end());
219      return Merges;
220    }
221
222    if (MemOps[i].Position > Pos) {
223      Pos = MemOps[i].Position;
224      Loc = MemOps[i].MBBI;
225    }
226  }
227
228  bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
229  if (mergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode,Scratch,Regs, TII)) {
230    Merges.push_back(prior(Loc));
231    for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
232      MBB.erase(MemOps[i].MBBI);
233      MemOps[i].Merged = true;
234    }
235  }
236
237  return Merges;
238}
239
240static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
241                                       unsigned Bytes) {
242  return (MI && MI->getOpcode() == ARM::SUBri &&
243          MI->getOperand(0).getReg() == Base &&
244          MI->getOperand(1).getReg() == Base &&
245          ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
246}
247
248static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
249                                       unsigned Bytes) {
250  return (MI && MI->getOpcode() == ARM::ADDri &&
251          MI->getOperand(0).getReg() == Base &&
252          MI->getOperand(1).getReg() == Base &&
253          ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
254}
255
256static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
257  switch (MI->getOpcode()) {
258  default: return 0;
259  case ARM::LDR:
260  case ARM::STR:
261  case ARM::FLDS:
262  case ARM::FSTS:
263    return 4;
264  case ARM::FLDD:
265  case ARM::FSTD:
266    return 8;
267  case ARM::LDM:
268  case ARM::STM:
269    return (MI->getNumOperands() - 2) * 4;
270  case ARM::FLDMS:
271  case ARM::FSTMS:
272  case ARM::FLDMD:
273  case ARM::FSTMD:
274    return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
275  }
276}
277
278/// mergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
279/// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
280///
281/// stmia rn, <ra, rb, rc>
282/// rn := rn + 4 * 3;
283/// =>
284/// stmia rn!, <ra, rb, rc>
285///
286/// rn := rn - 4 * 3;
287/// ldmia rn, <ra, rb, rc>
288/// =>
289/// ldmdb rn!, <ra, rb, rc>
290static bool mergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
291                                      MachineBasicBlock::iterator MBBI) {
292  MachineInstr *MI = MBBI;
293  unsigned Base = MI->getOperand(0).getReg();
294  unsigned Bytes = getLSMultipleTransferSize(MI);
295  int Opcode = MI->getOpcode();
296  bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
297
298  if (isAM4) {
299    if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
300      return false;
301
302    // Can't use the updating AM4 sub-mode if the base register is also a dest
303    // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
304    for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) {
305      if (MI->getOperand(i).getReg() == Base)
306        return false;
307    }
308
309    ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
310    if (MBBI != MBB.begin()) {
311      MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
312      if (Mode == ARM_AM::ia &&
313          isMatchingDecrement(PrevMBBI, Base, Bytes)) {
314        MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
315        MBB.erase(PrevMBBI);
316        return true;
317      } else if (Mode == ARM_AM::ib &&
318                 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
319        MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
320        MBB.erase(PrevMBBI);
321        return true;
322      }
323    }
324
325    if (MBBI != MBB.end()) {
326      MachineBasicBlock::iterator NextMBBI = next(MBBI);
327      if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
328          isMatchingIncrement(NextMBBI, Base, Bytes)) {
329        MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
330        MBB.erase(NextMBBI);
331        return true;
332      } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
333                 isMatchingDecrement(NextMBBI, Base, Bytes)) {
334        MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
335        MBB.erase(NextMBBI);
336        return true;
337      }
338    }
339  } else {
340    // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
341    if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
342      return false;
343
344    ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
345    unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
346    if (MBBI != MBB.begin()) {
347      MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
348      if (Mode == ARM_AM::ia &&
349          isMatchingDecrement(PrevMBBI, Base, Bytes)) {
350        MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
351        MBB.erase(PrevMBBI);
352        return true;
353      }
354    }
355
356    if (MBBI != MBB.end()) {
357      MachineBasicBlock::iterator NextMBBI = next(MBBI);
358      if (Mode == ARM_AM::ia &&
359          isMatchingIncrement(NextMBBI, Base, Bytes)) {
360        MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
361        MBB.erase(NextMBBI);
362      }
363      return true;
364    }
365  }
366
367  return false;
368}
369
370static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
371  switch (Opc) {
372  case ARM::LDR: return ARM::LDR_PRE;
373  case ARM::STR: return ARM::STR_PRE;
374  case ARM::FLDS: return ARM::FLDMS;
375  case ARM::FLDD: return ARM::FLDMD;
376  case ARM::FSTS: return ARM::FSTMS;
377  case ARM::FSTD: return ARM::FSTMD;
378  default: abort();
379  }
380  return 0;
381}
382
383static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
384  switch (Opc) {
385  case ARM::LDR: return ARM::LDR_POST;
386  case ARM::STR: return ARM::STR_POST;
387  case ARM::FLDS: return ARM::FLDMS;
388  case ARM::FLDD: return ARM::FLDMD;
389  case ARM::FSTS: return ARM::FSTMS;
390  case ARM::FSTD: return ARM::FSTMD;
391  default: abort();
392  }
393  return 0;
394}
395
396/// mergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
397/// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
398static bool mergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
399                                     MachineBasicBlock::iterator MBBI,
400                                     const TargetInstrInfo *TII) {
401  MachineInstr *MI = MBBI;
402  unsigned Base = MI->getOperand(1).getReg();
403  bool BaseKill = MI->getOperand(1).isKill();
404  unsigned Bytes = getLSMultipleTransferSize(MI);
405  int Opcode = MI->getOpcode();
406  bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
407  if ((isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0) ||
408      (!isAM2 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0))
409    return false;
410
411  bool isLd = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
412  // Can't do the merge if the destination register is the same as the would-be
413  // writeback register.
414  if (isLd && MI->getOperand(0).getReg() == Base)
415    return false;
416
417  bool DoMerge = false;
418  ARM_AM::AddrOpc AddSub = ARM_AM::add;
419  unsigned NewOpc = 0;
420  if (MBBI != MBB.begin()) {
421    MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
422    if (isMatchingDecrement(PrevMBBI, Base, Bytes)) {
423      DoMerge = true;
424      AddSub = ARM_AM::sub;
425      NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
426    } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes)) {
427      DoMerge = true;
428      NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
429    }
430    if (DoMerge)
431      MBB.erase(PrevMBBI);
432  }
433
434  if (!DoMerge && MBBI != MBB.end()) {
435    MachineBasicBlock::iterator NextMBBI = next(MBBI);
436    if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes)) {
437      DoMerge = true;
438      AddSub = ARM_AM::sub;
439      NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
440    } else if (isMatchingIncrement(NextMBBI, Base, Bytes)) {
441      DoMerge = true;
442      NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
443    }
444    if (DoMerge)
445      MBB.erase(NextMBBI);
446  }
447
448  if (!DoMerge)
449    return false;
450
451  bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
452  unsigned Offset = isAM2 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
453    : ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
454                        true, isDPR ? 2 : 1);
455  if (isLd) {
456    if (isAM2)
457      // LDR_PRE, LDR_POST;
458      BuildMI(MBB, MBBI, TII->get(NewOpc), MI->getOperand(0).getReg())
459        .addReg(Base, true)
460        .addReg(Base).addReg(0).addImm(Offset);
461    else
462      BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base, false, false, BaseKill)
463        .addImm(Offset).addReg(MI->getOperand(0).getReg(), true);
464  } else {
465    MachineOperand &MO = MI->getOperand(0);
466    if (isAM2)
467      // STR_PRE, STR_POST;
468      BuildMI(MBB, MBBI, TII->get(NewOpc), Base)
469        .addReg(MO.getReg(), false, false, MO.isKill())
470        .addReg(Base).addReg(0).addImm(Offset);
471    else
472      BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base)
473        .addImm(Offset).addReg(MO.getReg(), false, false, MO.isKill());
474  }
475  MBB.erase(MBBI);
476
477  return true;
478}
479
480/// isMemoryOp - Returns true if instruction is a memory operations (that this
481/// pass is capable of operating on).
482static bool isMemoryOp(MachineInstr *MI) {
483  int Opcode = MI->getOpcode();
484  switch (Opcode) {
485  default: break;
486  case ARM::LDR:
487  case ARM::STR:
488    return MI->getOperand(1).isRegister() && MI->getOperand(2).getReg() == 0;
489  case ARM::FLDS:
490  case ARM::FSTS:
491    return MI->getOperand(1).isRegister();
492  case ARM::FLDD:
493  case ARM::FSTD:
494    return MI->getOperand(1).isRegister();
495  }
496  return false;
497}
498
499/// AdvanceRS - Advance register scavenger to just before the earliest memory
500/// op that is being merged.
501void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
502  MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
503  unsigned Position = MemOps[0].Position;
504  for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
505    if (MemOps[i].Position < Position) {
506      Position = MemOps[i].Position;
507      Loc = MemOps[i].MBBI;
508    }
509  }
510
511  if (Loc != MBB.begin())
512    RS->forward(prior(Loc));
513}
514
515/// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
516/// ops of the same base and incrementing offset into LDM / STM ops.
517bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
518  unsigned NumMerges = 0;
519  unsigned NumMemOps = 0;
520  MemOpQueue MemOps;
521  unsigned CurrBase = 0;
522  int CurrOpc = -1;
523  unsigned CurrSize = 0;
524  unsigned Position = 0;
525
526  RS->enterBasicBlock(&MBB);
527  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
528  while (MBBI != E) {
529    bool Advance  = false;
530    bool TryMerge = false;
531    bool Clobber  = false;
532
533    bool isMemOp = isMemoryOp(MBBI);
534    if (isMemOp) {
535      int Opcode = MBBI->getOpcode();
536      bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
537      unsigned Size = getLSMultipleTransferSize(MBBI);
538      unsigned Base = MBBI->getOperand(1).getReg();
539      unsigned OffIdx = MBBI->getNumOperands()-1;
540      unsigned OffField = MBBI->getOperand(OffIdx).getImm();
541      int Offset = isAM2
542        ? ARM_AM::getAM2Offset(OffField) : ARM_AM::getAM5Offset(OffField) * 4;
543      if (isAM2) {
544        if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
545          Offset = -Offset;
546      } else {
547        if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
548          Offset = -Offset;
549      }
550      // Watch out for:
551      // r4 := ldr [r5]
552      // r5 := ldr [r5, #4]
553      // r6 := ldr [r5, #8]
554      //
555      // The second ldr has effectively broken the chain even though it
556      // looks like the later ldr(s) use the same base register. Try to
557      // merge the ldr's so far, including this one. But don't try to
558      // combine the following ldr(s).
559      Clobber = (Opcode == ARM::LDR && Base == MBBI->getOperand(0).getReg());
560      if (CurrBase == 0 && !Clobber) {
561        // Start of a new chain.
562        CurrBase = Base;
563        CurrOpc  = Opcode;
564        CurrSize = Size;
565        MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
566        NumMemOps++;
567        Advance = true;
568      } else {
569        if (Clobber) {
570          TryMerge = true;
571          Advance = true;
572        }
573
574        if (CurrOpc == Opcode && CurrBase == Base) {
575          // Continue adding to the queue.
576          if (Offset > MemOps.back().Offset) {
577            MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
578            NumMemOps++;
579            Advance = true;
580          } else {
581            for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
582                 I != E; ++I) {
583              if (Offset < I->Offset) {
584                MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
585                NumMemOps++;
586                Advance = true;
587                break;
588              } else if (Offset == I->Offset) {
589                // Collision! This can't be merged!
590                break;
591              }
592            }
593          }
594        }
595      }
596    }
597
598    if (Advance) {
599      ++Position;
600      ++MBBI;
601    } else
602      TryMerge = true;
603
604    if (TryMerge) {
605      if (NumMemOps > 1) {
606        // Try to find a free register to use as a new base in case it's needed.
607        // First advance to the instruction just before the start of the chain.
608        AdvanceRS(MBB, MemOps);
609        // Find a scratch register. Make sure it's a call clobbered register or
610        // a spilled callee-saved register.
611        unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass, true);
612        if (!Scratch)
613          Scratch = RS->FindUnusedReg(&ARM::GPRRegClass,
614                                      AFI->getSpilledCSRegisters());
615        // Process the load / store instructions.
616        RS->forward(prior(MBBI));
617
618        // Merge ops.
619        SmallVector<MachineBasicBlock::iterator,4> MBBII =
620          MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize, Scratch, MemOps);
621
622        // Try folding preceeding/trailing base inc/dec into the generated
623        // LDM/STM ops.
624        for (unsigned i = 0, e = MBBII.size(); i < e; ++i)
625          if (mergeBaseUpdateLSMultiple(MBB, MBBII[i]))
626            NumMerges++;
627        NumMerges += MBBII.size();
628
629        // Try folding preceeding/trailing base inc/dec into those load/store
630        // that were not merged to form LDM/STM ops.
631        for (unsigned i = 0; i != NumMemOps; ++i)
632          if (!MemOps[i].Merged)
633            if (mergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII))
634              NumMerges++;
635
636        // RS may be pointing to an instruction that's deleted.
637        RS->skipTo(prior(MBBI));
638      }
639
640      CurrBase = 0;
641      CurrOpc = -1;
642      if (NumMemOps) {
643        MemOps.clear();
644        NumMemOps = 0;
645      }
646
647      // If iterator hasn't been advanced and this is not a memory op, skip it.
648      // It can't start a new chain anyway.
649      if (!Advance && !isMemOp && MBBI != E) {
650        ++Position;
651        ++MBBI;
652      }
653    }
654  }
655  return NumMerges > 0;
656}
657
658/// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
659/// (bx lr) into the preceeding stack restore so it directly restore the value
660/// of LR into pc.
661///   ldmfd sp!, {r7, lr}
662///   bx lr
663/// =>
664///   ldmfd sp!, {r7, pc}
665bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
666  if (MBB.empty()) return false;
667
668  MachineBasicBlock::iterator MBBI = prior(MBB.end());
669  if (MBBI->getOpcode() == ARM::BX_RET && MBBI != MBB.begin()) {
670    MachineInstr *PrevMI = prior(MBBI);
671    if (PrevMI->getOpcode() == ARM::LDM) {
672      MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
673      if (MO.getReg() == ARM::LR) {
674        PrevMI->setInstrDescriptor(TII->get(ARM::LDM_RET));
675        MO.setReg(ARM::PC);
676        MBB.erase(MBBI);
677        return true;
678      }
679    }
680  }
681  return false;
682}
683
684bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
685  const TargetMachine &TM = Fn.getTarget();
686  AFI = Fn.getInfo<ARMFunctionInfo>();
687  TII = TM.getInstrInfo();
688  MRI = TM.getRegisterInfo();
689  RS = new RegScavenger();
690
691  bool Modified = false;
692  for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
693       ++MFI) {
694    MachineBasicBlock &MBB = *MFI;
695    Modified |= LoadStoreMultipleOpti(MBB);
696    Modified |= MergeReturnIntoLDM(MBB);
697  }
698
699  delete RS;
700  return Modified;
701}
702