Thumb2SizeReduction.cpp revision 3a21425dbe09c7ac85e6b156f82184dd6132435a
1//===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#define DEBUG_TYPE "t2-reduce-size"
11#include "ARM.h"
12#include "ARMBaseRegisterInfo.h"
13#include "ARMBaseInstrInfo.h"
14#include "Thumb2InstrInfo.h"
15#include "llvm/CodeGen/MachineInstr.h"
16#include "llvm/CodeGen/MachineInstrBuilder.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/Support/CommandLine.h"
19#include "llvm/Support/Compiler.h"
20#include "llvm/Support/Debug.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/Statistic.h"
23using namespace llvm;
24
25STATISTIC(NumNarrows,  "Number of 32-bit instrs reduced to 16-bit ones");
26STATISTIC(Num2Addrs,   "Number of 32-bit instrs reduced to 2addr 16-bit ones");
27STATISTIC(NumLdSts,    "Number of 32-bit load / store reduced to 16-bit ones");
28
29static cl::opt<int> ReduceLimit("t2-reduce-limit", cl::init(-1), cl::Hidden);
30
31namespace {
32  /// ReduceTable - A static table with information on mapping from wide
33  /// opcodes to narrow
34  struct ReduceEntry {
35    unsigned WideOpc;      // Wide opcode
36    unsigned NarrowOpc1;   // Narrow opcode to transform to
37    unsigned NarrowOpc2;   // Narrow opcode when it's two-address
38    uint8_t  Imm1Limit;    // Limit of immediate field (bits)
39    uint8_t  Imm2Limit;    // Limit of immediate field when it's two-address
40    unsigned LowRegs1 : 1; // Only possible if low-registers are used
41    unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
42    unsigned PredCC1  : 1; // 0 - If predicated, cc is on and vice versa.
43                           // 1 - No cc field.
44    unsigned PredCC2  : 1;
45    unsigned Special  : 1; // Needs to be dealt with specially
46  };
47
48  static const ReduceEntry ReduceTable[] = {
49    // Wide,        Narrow1,      Narrow2,     imm1,imm2,  lo1, lo2, P/C, S
50    { ARM::t2ADCrr, ARM::tADC,    0,             0,   0,    1,   0,  0,0, 0 },
51    // FIXME: t2ADDS variants.
52    { ARM::t2ADDri, ARM::tADDi3,  ARM::tADDi8,   3,   8,    1,   1,  0,0, 0 },
53    { ARM::t2ADDrr, ARM::tADDrr,  ARM::tADDhirr, 0,   0,    1,   0,  0,1, 0 },
54    { ARM::t2ANDrr, 0,            ARM::tAND,     0,   0,    0,   1,  0,0, 0 },
55    { ARM::t2ASRri, ARM::tASRri,  0,             5,   0,    1,   0,  0,0, 0 },
56    { ARM::t2ASRrr, 0,            ARM::tASRrr,   0,   0,    0,   1,  0,0, 0 },
57    { ARM::t2BICrr, 0,            ARM::tBIC,     0,   0,    0,   1,  0,0, 0 },
58    { ARM::t2CMNrr, ARM::tCMN,    0,             0,   0,    1,   0,  1,0, 0 },
59    { ARM::t2CMPri, ARM::tCMPi8,  0,             8,   0,    1,   0,  1,0, 0 },
60    { ARM::t2CMPrr, ARM::tCMPhir, 0,             0,   0,    0,   0,  1,0, 0 },
61    { ARM::t2CMPzri,ARM::tCMPzi8, 0,             8,   0,    1,   0,  1,0, 0 },
62    { ARM::t2CMPzrr,ARM::tCMPzhir,0,             0,   0,    0,   0,  1,0, 0 },
63    { ARM::t2EORrr, 0,            ARM::tEOR,     0,   0,    0,   1,  0,0, 0 },
64    { ARM::t2LSLri, ARM::tLSLri,  0,             5,   0,    1,   0,  0,0, 0 },
65    { ARM::t2LSLrr, 0,            ARM::tLSLrr,   0,   0,    0,   1,  0,0, 0 },
66    { ARM::t2LSRri, ARM::tLSRri,  0,             5,   0,    1,   0,  0,0, 0 },
67    { ARM::t2LSRrr, 0,            ARM::tLSRrr,   0,   0,    0,   1,  0,0, 0 },
68    { ARM::t2MOVi,  ARM::tMOVi8,  0,             8,   0,    1,   0,  0,0, 0 },
69    // FIXME: Do we need the 16-bit 'S' variant?
70    // FIXME: t2MOVcc
71    { ARM::t2MOVr,ARM::tMOVgpr2gpr,0,            0,   0,    0,   0,  1,0, 0 },
72    { ARM::t2MUL,   0,            ARM::tMUL,     0,   0,    0,   1,  0,0, 0 },
73    { ARM::t2MVNr,  ARM::tMVN,    0,             0,   0,    1,   0,  0,0, 0 },
74    { ARM::t2ORRrr, 0,            ARM::tORR,     0,   0,    0,   1,  0,0, 0 },
75    { ARM::t2REV,   ARM::tREV,    0,             0,   0,    1,   0,  1,0, 0 },
76    { ARM::t2REV16, ARM::tREV16,  0,             0,   0,    1,   0,  1,0, 0 },
77    { ARM::t2REVSH, ARM::tREVSH,  0,             0,   0,    1,   0,  1,0, 0 },
78    { ARM::t2RORrr, 0,            ARM::tROR,     0,   0,    0,   1,  0,0, 0 },
79    // FIXME: T2RSBri immediate must be zero. Also need entry for T2RSBS
80    //{ ARM::t2RSBri, ARM::tRSB,    0,             0,   0,    1,   0,  0,0, 0 },
81    { ARM::t2SUBri, ARM::tSUBi3,  ARM::tSUBi8,   3,   8,    1,   1,  0,0, 0 },
82    { ARM::t2SUBrr, ARM::tSUBrr,  0,             0,   0,    1,   0,  0,0, 0 },
83    { ARM::t2SXTBr, ARM::tSXTB,   0,             0,   0,    1,   0,  1,0, 0 },
84    { ARM::t2SXTHr, ARM::tSXTH,   0,             0,   0,    1,   0,  1,0, 0 },
85    { ARM::t2TSTrr, ARM::tTST,    0,             0,   0,    1,   0,  1,0, 0 },
86    { ARM::t2UXTBr, ARM::tUXTB,   0,             0,   0,    1,   0,  1,0, 0 },
87    { ARM::t2UXTHr, ARM::tUXTH,   0,             0,   0,    1,   0,  1,0, 0 },
88
89    // FIXME: Clean this up after splitting each Thumb load / store opcode
90    // into multiple ones.
91    { ARM::t2LDRi12,ARM::tLDR,    0,             5,   0,    1,   0,  0,0, 1 },
92    { ARM::t2LDRs,  ARM::tLDR,    0,             0,   0,    1,   0,  0,0, 1 },
93    { ARM::t2LDRBi12,ARM::tLDRB,  0,             5,   0,    1,   0,  0,0, 1 },
94    { ARM::t2LDRBs, ARM::tLDRB,   0,             0,   0,    1,   0,  0,0, 1 },
95    { ARM::t2LDRHi12,ARM::tLDRH,  0,             5,   0,    1,   0,  0,0, 1 },
96    { ARM::t2LDRHs, ARM::tLDRH,   0,             0,   0,    1,   0,  0,0, 1 },
97    { ARM::t2LDRSBs,ARM::tLDR,    0,             0,   0,    1,   0,  0,0, 1 },
98    { ARM::t2LDRSHs,ARM::tLDRSH,  0,             0,   0,    1,   0,  0,0, 1 },
99    { ARM::t2STRi12,ARM::tSTR,    0,             5,   0,    1,   0,  0,0, 1 },
100    { ARM::t2STRs,  ARM::tSTR,    0,             0,   0,    1,   0,  0,0, 1 },
101    { ARM::t2STRBi12,ARM::tSTRB,  0,             5,   0,    1,   0,  0,0, 1 },
102    { ARM::t2STRBs, ARM::tSTRB,   0,             0,   0,    1,   0,  0,0, 1 },
103    { ARM::t2STRHi12,ARM::tSTRH,  0,             5,   0,    1,   0,  0,0, 1 },
104    { ARM::t2STRHs, ARM::tSTRH,   0,             0,   0,    1,   0,  0,0, 1 }
105  };
106
107  class VISIBILITY_HIDDEN Thumb2SizeReduce : public MachineFunctionPass {
108  public:
109    static char ID;
110    Thumb2SizeReduce();
111
112    const TargetInstrInfo *TII;
113
114    virtual bool runOnMachineFunction(MachineFunction &MF);
115
116    virtual const char *getPassName() const {
117      return "Thumb2 instruction size reduction pass";
118    }
119
120  private:
121    /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
122    DenseMap<unsigned, unsigned> ReduceOpcodeMap;
123
124    bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
125                         const ReduceEntry &Entry);
126
127    bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
128                       const ReduceEntry &Entry, bool LiveCPSR);
129
130    /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
131    /// instruction.
132    bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
133                       const ReduceEntry &Entry,
134                       bool LiveCPSR);
135
136    /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
137    /// non-two-address instruction.
138    bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
139                        const ReduceEntry &Entry,
140                        bool LiveCPSR);
141
142    /// ReduceMBB - Reduce width of instructions in the specified basic block.
143    bool ReduceMBB(MachineBasicBlock &MBB);
144  };
145  char Thumb2SizeReduce::ID = 0;
146}
147
148Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(&ID) {
149  for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
150    unsigned FromOpc = ReduceTable[i].WideOpc;
151    if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
152      assert(false && "Duplicated entries?");
153  }
154}
155
156static bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
157                            bool is2Addr, ARMCC::CondCodes Pred,
158                            bool LiveCPSR, bool &HasCC, bool &CCDead) {
159  if ((is2Addr  && Entry.PredCC2 == 0) ||
160      (!is2Addr && Entry.PredCC1 == 0)) {
161    if (Pred == ARMCC::AL) {
162      // Not predicated, must set CPSR.
163      if (!HasCC) {
164        // Original instruction was not setting CPSR, but CPSR is not
165        // currently live anyway. It's ok to set it. The CPSR def is
166        // dead though.
167        if (!LiveCPSR) {
168          HasCC = true;
169          CCDead = true;
170          return true;
171        }
172        return false;
173      }
174    } else {
175      // Predicated, must not set CPSR.
176      if (HasCC)
177        return false;
178    }
179  } else {
180    // 16-bit instruction does not set CPSR.
181    if (HasCC)
182      return false;
183  }
184
185  return true;
186}
187
188bool
189Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
190                                  const ReduceEntry &Entry) {
191  unsigned Scale = 1;
192  bool HasImmOffset = false;
193  bool HasShift = false;
194  switch (Entry.WideOpc) {
195  default:
196    llvm_unreachable("Unexpected Thumb2 load / store opcode!");
197  case ARM::t2LDRi12:
198  case ARM::t2STRi12:
199    Scale = 4;
200    HasImmOffset = true;
201    break;
202  case ARM::t2LDRBi12:
203  case ARM::t2STRBi12:
204    HasImmOffset = true;
205    break;
206  case ARM::t2LDRHi12:
207  case ARM::t2STRHi12:
208    Scale = 2;
209    HasImmOffset = true;
210    break;
211  case ARM::t2LDRs:
212  case ARM::t2LDRBs:
213  case ARM::t2LDRHs:
214  case ARM::t2LDRSBs:
215  case ARM::t2LDRSHs:
216  case ARM::t2STRs:
217  case ARM::t2STRBs:
218  case ARM::t2STRHs:
219    HasShift = true;
220    break;
221  }
222
223  unsigned OffsetReg = 0;
224  bool OffsetKill = false;
225  if (HasShift) {
226    OffsetReg  = MI->getOperand(2).getReg();
227    OffsetKill = MI->getOperand(2).isKill();
228    if (MI->getOperand(3).getImm())
229      // Thumb1 addressing mode doesn't support shift.
230      return false;
231  }
232
233  unsigned OffsetImm = 0;
234  if (HasImmOffset) {
235    OffsetImm = MI->getOperand(2).getImm();
236    unsigned MaxOffset = ((1 << Entry.Imm1Limit) - 1) * Scale;
237    if ((OffsetImm & (Scale-1)) || OffsetImm > MaxOffset)
238      // Make sure the immediate field fits.
239      return false;
240  }
241
242  // Add the 16-bit load / store instruction.
243  // FIXME: Thumb1 addressing mode encode both immediate and register offset.
244  DebugLoc dl = MI->getDebugLoc();
245  MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Entry.NarrowOpc1))
246    .addOperand(MI->getOperand(0))
247    .addOperand(MI->getOperand(1));
248  if (Entry.NarrowOpc1 != ARM::tLDRSB && Entry.NarrowOpc1 != ARM::tLDRSH) {
249    // tLDRSB and tLDRSH do not have an immediate offset field. On the other
250    // hand, it must have an offset register.
251    assert(OffsetReg && "Invalid so_reg load / store address!");
252    // FIXME: Remove this special case.
253    MIB.addImm(OffsetImm/Scale);
254  }
255  MIB.addReg(OffsetReg, getKillRegState(OffsetKill));
256
257  // Transfer the rest of operands.
258  unsigned OpNum = HasShift ? 4 : 3;
259  for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
260    MIB.addOperand(MI->getOperand(OpNum));
261
262  DOUT << "Converted 32-bit: " << *MI << "       to 16-bit: " << *MIB;
263
264  MBB.erase(MI);
265  ++NumLdSts;
266  return true;
267}
268
269static bool VerifyLowRegs(MachineInstr *MI, const TargetInstrDesc &TID) {
270  for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
271    const MachineOperand &MO = MI->getOperand(i);
272    if (!MO.isReg())
273      continue;
274    unsigned Reg = MO.getReg();
275    if (Reg == 0 || Reg == ARM::CPSR)
276      continue;
277    if (!isARMLowRegister(Reg))
278      return false;
279  }
280  return true;
281}
282
283bool
284Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
285                                const ReduceEntry &Entry,
286                                bool LiveCPSR) {
287  const TargetInstrDesc &TID = MI->getDesc();
288  if (Entry.LowRegs1 && !VerifyLowRegs(MI, TID))
289    return false;
290
291  if (TID.mayLoad() || TID.mayStore())
292    return ReduceLoadStore(MBB, MI, Entry);
293  return false;
294}
295
296bool
297Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
298                                const ReduceEntry &Entry,
299                                bool LiveCPSR) {
300  const TargetInstrDesc &TID = MI->getDesc();
301  unsigned Reg0 = MI->getOperand(0).getReg();
302  unsigned Reg1 = MI->getOperand(1).getReg();
303  if (Reg0 != Reg1)
304    return false;
305  if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
306    return false;
307  if (Entry.Imm2Limit) {
308    unsigned Imm = MI->getOperand(2).getImm();
309    unsigned Limit = (1 << Entry.Imm2Limit) - 1;
310    if (Imm > Limit)
311      return false;
312  } else {
313    unsigned Reg2 = MI->getOperand(2).getReg();
314    if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
315      return false;
316  }
317
318  // Check if it's possible / necessary to transfer the predicate.
319  const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc2);
320  unsigned PredReg = 0;
321  ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
322  bool SkipPred = false;
323  if (Pred != ARMCC::AL) {
324    if (!NewTID.isPredicable())
325      // Can't transfer predicate, fail.
326      return false;
327  } else {
328    SkipPred = !NewTID.isPredicable();
329  }
330
331  bool HasCC = false;
332  bool CCDead = false;
333  if (TID.hasOptionalDef()) {
334    unsigned NumOps = TID.getNumOperands();
335    HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
336    if (HasCC && MI->getOperand(NumOps-1).isDead())
337      CCDead = true;
338  }
339  if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
340    return false;
341
342  // Add the 16-bit instruction.
343  DebugLoc dl = MI->getDebugLoc();
344  MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Entry.NarrowOpc2));
345  MIB.addOperand(MI->getOperand(0));
346  if (HasCC)
347    AddDefaultT1CC(MIB, CCDead);
348
349  // Transfer the rest of operands.
350  unsigned NumOps = TID.getNumOperands();
351  for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
352    if (i < NumOps && TID.OpInfo[i].isOptionalDef())
353      continue;
354    if (SkipPred && TID.OpInfo[i].isPredicate())
355      continue;
356    MIB.addOperand(MI->getOperand(i));
357  }
358
359  DOUT << "Converted 32-bit: " << *MI << "       to 16-bit: " << *MIB;
360
361  MBB.erase(MI);
362  ++Num2Addrs;
363  return true;
364}
365
366bool
367Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
368                                 const ReduceEntry &Entry,
369                                 bool LiveCPSR) {
370  unsigned Limit = ~0U;
371  if (Entry.Imm1Limit)
372    Limit = (1 << Entry.Imm1Limit) - 1;
373
374  const TargetInstrDesc &TID = MI->getDesc();
375  for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
376    if (TID.OpInfo[i].isPredicate())
377      continue;
378    const MachineOperand &MO = MI->getOperand(i);
379    if (MO.isReg()) {
380      unsigned Reg = MO.getReg();
381      if (!Reg || Reg == ARM::CPSR)
382        continue;
383      if (Entry.LowRegs1 && !isARMLowRegister(Reg))
384        return false;
385    } else if (MO.isImm()) {
386      if (MO.getImm() > Limit)
387        return false;
388    }
389  }
390
391  // Check if it's possible / necessary to transfer the predicate.
392  const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc1);
393  unsigned PredReg = 0;
394  ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
395  bool SkipPred = false;
396  if (Pred != ARMCC::AL) {
397    if (!NewTID.isPredicable())
398      // Can't transfer predicate, fail.
399      return false;
400  } else {
401    SkipPred = !NewTID.isPredicable();
402  }
403
404  bool HasCC = false;
405  bool CCDead = false;
406  if (TID.hasOptionalDef()) {
407    unsigned NumOps = TID.getNumOperands();
408    HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
409    if (HasCC && MI->getOperand(NumOps-1).isDead())
410      CCDead = true;
411  }
412  if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
413    return false;
414
415  // Add the 16-bit instruction.
416  DebugLoc dl = MI->getDebugLoc();
417  MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Entry.NarrowOpc1));
418  MIB.addOperand(MI->getOperand(0));
419  if (HasCC)
420    AddDefaultT1CC(MIB, CCDead);
421
422  // Transfer the rest of operands.
423  unsigned NumOps = TID.getNumOperands();
424  for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
425    if (i < NumOps && TID.OpInfo[i].isOptionalDef())
426      continue;
427    if (SkipPred && TID.OpInfo[i].isPredicate())
428      continue;
429    MIB.addOperand(MI->getOperand(i));
430  }
431
432
433  DOUT << "Converted 32-bit: " << *MI << "       to 16-bit: " << *MIB;
434
435  MBB.erase(MI);
436  ++NumNarrows;
437  return true;
438}
439
440static bool UpdateCPSRLiveness(MachineInstr &MI, bool LiveCPSR) {
441  bool HasDef = false;
442  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
443    const MachineOperand &MO = MI.getOperand(i);
444    if (!MO.isReg() || MO.isUndef())
445      continue;
446    if (MO.getReg() != ARM::CPSR)
447      continue;
448    if (MO.isDef()) {
449      if (!MO.isDead())
450        HasDef = true;
451      continue;
452    }
453
454    assert(LiveCPSR && "CPSR liveness tracking is wrong!");
455    if (MO.isKill()) {
456      LiveCPSR = false;
457      break;
458    }
459  }
460
461  return HasDef || LiveCPSR;
462}
463
464bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
465  bool Modified = false;
466
467  bool LiveCPSR = false;
468  // Yes, CPSR could be livein.
469  for (MachineBasicBlock::const_livein_iterator I = MBB.livein_begin(),
470         E = MBB.livein_end(); I != E; ++I) {
471    if (*I == ARM::CPSR) {
472      LiveCPSR = true;
473      break;
474    }
475  }
476
477  MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
478  MachineBasicBlock::iterator NextMII;
479  for (; MII != E; MII = NextMII) {
480    NextMII = next(MII);
481
482    MachineInstr *MI = &*MII;
483    unsigned Opcode = MI->getOpcode();
484    DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
485    if (OPI != ReduceOpcodeMap.end()) {
486      const ReduceEntry &Entry = ReduceTable[OPI->second];
487      // Ignore "special" cases for now.
488      if (Entry.Special) {
489        if (ReduceSpecial(MBB, MI, Entry, LiveCPSR)) {
490          Modified = true;
491          MachineBasicBlock::iterator I = prior(NextMII);
492          MI = &*I;
493        }
494        goto ProcessNext;
495      }
496
497      // Try to transform to a 16-bit two-address instruction.
498      if (Entry.NarrowOpc2 && ReduceTo2Addr(MBB, MI, Entry, LiveCPSR)) {
499        Modified = true;
500        MachineBasicBlock::iterator I = prior(NextMII);
501        MI = &*I;
502        goto ProcessNext;
503      }
504
505      // Try to transform ro a 16-bit non-two-address instruction.
506      if (Entry.NarrowOpc1 && ReduceToNarrow(MBB, MI, Entry, LiveCPSR))
507        Modified = true;
508    }
509
510  ProcessNext:
511    LiveCPSR = UpdateCPSRLiveness(*MI, LiveCPSR);
512
513    if (ReduceLimit != -1 && ((int)(NumNarrows + Num2Addrs) > ReduceLimit))
514      break;
515  }
516
517  return Modified;
518}
519
520bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
521  const TargetMachine &TM = MF.getTarget();
522  TII = TM.getInstrInfo();
523
524  bool Modified = false;
525  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
526    Modified |= ReduceMBB(*I);
527  return Modified;
528}
529
530/// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
531/// reduction pass.
532FunctionPass *llvm::createThumb2SizeReductionPass() {
533  return new Thumb2SizeReduce();
534}
535