ARMConstantIslandPass.cpp revision a26811ec83d00344a739d84f4b8584e5548b94ce
1//===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains a pass that splits the constant pool up into 'islands' 11// which are scattered through-out the function. This is required due to the 12// limited pc-relative displacements that ARM has. 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "arm-cp-islands" 17#include "ARM.h" 18#include "ARMMachineFunctionInfo.h" 19#include "ARMInstrInfo.h" 20#include "Thumb2InstrInfo.h" 21#include "MCTargetDesc/ARMAddressingModes.h" 22#include "llvm/CodeGen/MachineConstantPool.h" 23#include "llvm/CodeGen/MachineFunctionPass.h" 24#include "llvm/CodeGen/MachineJumpTableInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/raw_ostream.h" 30#include "llvm/ADT/SmallSet.h" 31#include "llvm/ADT/SmallVector.h" 32#include "llvm/ADT/STLExtras.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/Support/CommandLine.h" 35#include <algorithm> 36using namespace llvm; 37 38STATISTIC(NumCPEs, "Number of constpool entries"); 39STATISTIC(NumSplit, "Number of uncond branches inserted"); 40STATISTIC(NumCBrFixed, "Number of cond branches fixed"); 41STATISTIC(NumUBrFixed, "Number of uncond branches fixed"); 42STATISTIC(NumTBs, "Number of table branches generated"); 43STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk"); 44STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk"); 45STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed"); 46STATISTIC(NumJTMoved, "Number of jump table destination blocks moved"); 47STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted"); 48 49 50static cl::opt<bool> 51AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true), 52 cl::desc("Adjust basic block layout to better use TB[BH]")); 53 54namespace { 55 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM 56 /// requires constant pool entries to be scattered among the instructions 57 /// inside a function. To do this, it completely ignores the normal LLVM 58 /// constant pool; instead, it places constants wherever it feels like with 59 /// special instructions. 60 /// 61 /// The terminology used in this pass includes: 62 /// Islands - Clumps of constants placed in the function. 63 /// Water - Potential places where an island could be formed. 64 /// CPE - A constant pool entry that has been placed somewhere, which 65 /// tracks a list of users. 66 class ARMConstantIslands : public MachineFunctionPass { 67 /// BasicBlockInfo - Information about the offset and size of a single 68 /// basic block. 69 struct BasicBlockInfo { 70 /// Offset - Distance from the beginning of the function to the beginning 71 /// of this basic block. 72 /// 73 /// The two-byte pads required for Thumb alignment are counted as part of 74 /// the following block. 75 unsigned Offset; 76 77 /// Size - Size of the basic block in bytes. If the block contains 78 /// inline assembly, this is a worst case estimate. 79 /// 80 /// The two-byte pads required for Thumb alignment are counted as part of 81 /// the following block (i.e., the offset and size for a padded block 82 /// will both be ==2 mod 4). 83 unsigned Size; 84 85 /// Unalign - When non-zero, the block contains instructions (inline asm) 86 /// of unknown size. The real size may be smaller than Size bytes by a 87 /// multiple of 1 << Unalign. 88 uint8_t Unalign; 89 90 /// PostAlign - When non-zero, the block terminator contains a .align 91 /// directive, so the end of the block is aligned to 1 << PostAlign 92 /// bytes. 93 uint8_t PostAlign; 94 95 BasicBlockInfo() : Offset(0), Size(0), Unalign(0), PostAlign(0) {} 96 97 /// Compute the offset immediately following this block. 98 unsigned postOffset() const { return Offset + Size; } 99 }; 100 101 std::vector<BasicBlockInfo> BBInfo; 102 103 /// WaterList - A sorted list of basic blocks where islands could be placed 104 /// (i.e. blocks that don't fall through to the following block, due 105 /// to a return, unreachable, or unconditional branch). 106 std::vector<MachineBasicBlock*> WaterList; 107 108 /// NewWaterList - The subset of WaterList that was created since the 109 /// previous iteration by inserting unconditional branches. 110 SmallSet<MachineBasicBlock*, 4> NewWaterList; 111 112 typedef std::vector<MachineBasicBlock*>::iterator water_iterator; 113 114 /// CPUser - One user of a constant pool, keeping the machine instruction 115 /// pointer, the constant pool being referenced, and the max displacement 116 /// allowed from the instruction to the CP. The HighWaterMark records the 117 /// highest basic block where a new CPEntry can be placed. To ensure this 118 /// pass terminates, the CP entries are initially placed at the end of the 119 /// function and then move monotonically to lower addresses. The 120 /// exception to this rule is when the current CP entry for a particular 121 /// CPUser is out of range, but there is another CP entry for the same 122 /// constant value in range. We want to use the existing in-range CP 123 /// entry, but if it later moves out of range, the search for new water 124 /// should resume where it left off. The HighWaterMark is used to record 125 /// that point. 126 struct CPUser { 127 MachineInstr *MI; 128 MachineInstr *CPEMI; 129 MachineBasicBlock *HighWaterMark; 130 unsigned MaxDisp; 131 bool NegOk; 132 bool IsSoImm; 133 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp, 134 bool neg, bool soimm) 135 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) { 136 HighWaterMark = CPEMI->getParent(); 137 } 138 }; 139 140 /// CPUsers - Keep track of all of the machine instructions that use various 141 /// constant pools and their max displacement. 142 std::vector<CPUser> CPUsers; 143 144 /// CPEntry - One per constant pool entry, keeping the machine instruction 145 /// pointer, the constpool index, and the number of CPUser's which 146 /// reference this entry. 147 struct CPEntry { 148 MachineInstr *CPEMI; 149 unsigned CPI; 150 unsigned RefCount; 151 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0) 152 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {} 153 }; 154 155 /// CPEntries - Keep track of all of the constant pool entry machine 156 /// instructions. For each original constpool index (i.e. those that 157 /// existed upon entry to this pass), it keeps a vector of entries. 158 /// Original elements are cloned as we go along; the clones are 159 /// put in the vector of the original element, but have distinct CPIs. 160 std::vector<std::vector<CPEntry> > CPEntries; 161 162 /// ImmBranch - One per immediate branch, keeping the machine instruction 163 /// pointer, conditional or unconditional, the max displacement, 164 /// and (if isCond is true) the corresponding unconditional branch 165 /// opcode. 166 struct ImmBranch { 167 MachineInstr *MI; 168 unsigned MaxDisp : 31; 169 bool isCond : 1; 170 int UncondBr; 171 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr) 172 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {} 173 }; 174 175 /// ImmBranches - Keep track of all the immediate branch instructions. 176 /// 177 std::vector<ImmBranch> ImmBranches; 178 179 /// PushPopMIs - Keep track of all the Thumb push / pop instructions. 180 /// 181 SmallVector<MachineInstr*, 4> PushPopMIs; 182 183 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions. 184 SmallVector<MachineInstr*, 4> T2JumpTables; 185 186 /// HasFarJump - True if any far jump instruction has been emitted during 187 /// the branch fix up pass. 188 bool HasFarJump; 189 190 /// HasInlineAsm - True if the function contains inline assembly. 191 bool HasInlineAsm; 192 193 const ARMInstrInfo *TII; 194 const ARMSubtarget *STI; 195 ARMFunctionInfo *AFI; 196 bool isThumb; 197 bool isThumb1; 198 bool isThumb2; 199 public: 200 static char ID; 201 ARMConstantIslands() : MachineFunctionPass(ID) {} 202 203 virtual bool runOnMachineFunction(MachineFunction &MF); 204 205 virtual const char *getPassName() const { 206 return "ARM constant island placement and branch shortening pass"; 207 } 208 209 private: 210 void DoInitialPlacement(MachineFunction &MF, 211 std::vector<MachineInstr*> &CPEMIs); 212 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); 213 void JumpTableFunctionScan(MachineFunction &MF); 214 void InitialFunctionScan(MachineFunction &MF, 215 const std::vector<MachineInstr*> &CPEMIs); 216 MachineBasicBlock *SplitBlockBeforeInstr(MachineInstr *MI); 217 void UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB); 218 void AdjustBBOffsetsAfter(MachineBasicBlock *BB, int delta); 219 bool DecrementOldEntry(unsigned CPI, MachineInstr* CPEMI); 220 int LookForExistingCPEntry(CPUser& U, unsigned UserOffset); 221 bool LookForWater(CPUser&U, unsigned UserOffset, water_iterator &WaterIter); 222 void CreateNewWater(unsigned CPUserIndex, unsigned UserOffset, 223 MachineBasicBlock *&NewMBB); 224 bool HandleConstantPoolUser(MachineFunction &MF, unsigned CPUserIndex); 225 void RemoveDeadCPEMI(MachineInstr *CPEMI); 226 bool RemoveUnusedCPEntries(); 227 bool CPEIsInRange(MachineInstr *MI, unsigned UserOffset, 228 MachineInstr *CPEMI, unsigned Disp, bool NegOk, 229 bool DoDump = false); 230 bool WaterIsInRange(unsigned UserOffset, MachineBasicBlock *Water, 231 CPUser &U); 232 bool OffsetIsInRange(unsigned UserOffset, unsigned TrialOffset, 233 unsigned Disp, bool NegativeOK, bool IsSoImm = false); 234 bool BBIsInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp); 235 bool FixUpImmediateBr(MachineFunction &MF, ImmBranch &Br); 236 bool FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br); 237 bool FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br); 238 bool UndoLRSpillRestore(); 239 bool OptimizeThumb2Instructions(MachineFunction &MF); 240 bool OptimizeThumb2Branches(MachineFunction &MF); 241 bool ReorderThumb2JumpTables(MachineFunction &MF); 242 bool OptimizeThumb2JumpTables(MachineFunction &MF); 243 MachineBasicBlock *AdjustJTTargetBlockForward(MachineBasicBlock *BB, 244 MachineBasicBlock *JTBB); 245 246 void ComputeBlockSize(const MachineBasicBlock *MBB); 247 unsigned GetOffsetOf(MachineInstr *MI) const; 248 void dumpBBs(); 249 void verify(MachineFunction &MF); 250 }; 251 char ARMConstantIslands::ID = 0; 252} 253 254/// verify - check BBOffsets, BBSizes, alignment of islands 255void ARMConstantIslands::verify(MachineFunction &MF) { 256 for (unsigned i = 1, e = BBInfo.size(); i != e; ++i) 257 assert(BBInfo[i-1].postOffset() == BBInfo[i].Offset); 258 if (!isThumb) 259 return; 260#ifndef NDEBUG 261 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 262 MBBI != E; ++MBBI) { 263 MachineBasicBlock *MBB = MBBI; 264 if (!MBB->empty() && 265 MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) { 266 unsigned MBBId = MBB->getNumber(); 267 assert(HasInlineAsm || 268 (BBInfo[MBBId].Offset%4 == 0 && BBInfo[MBBId].Size%4 == 0) || 269 (BBInfo[MBBId].Offset%4 != 0 && BBInfo[MBBId].Size%4 != 0)); 270 } 271 } 272 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) { 273 CPUser &U = CPUsers[i]; 274 unsigned UserOffset = GetOffsetOf(U.MI) + (isThumb ? 4 : 8); 275 unsigned CPEOffset = GetOffsetOf(U.CPEMI); 276 unsigned Disp = UserOffset < CPEOffset ? CPEOffset - UserOffset : 277 UserOffset - CPEOffset; 278 assert(Disp <= U.MaxDisp || "Constant pool entry out of range!"); 279 } 280#endif 281} 282 283/// print block size and offset information - debugging 284void ARMConstantIslands::dumpBBs() { 285 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) { 286 DEBUG(errs() << "block " << J << " offset " << BBInfo[J].Offset 287 << " size " << BBInfo[J].Size << "\n"); 288 } 289} 290 291/// createARMConstantIslandPass - returns an instance of the constpool 292/// island pass. 293FunctionPass *llvm::createARMConstantIslandPass() { 294 return new ARMConstantIslands(); 295} 296 297bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) { 298 MachineConstantPool &MCP = *MF.getConstantPool(); 299 300 TII = (const ARMInstrInfo*)MF.getTarget().getInstrInfo(); 301 AFI = MF.getInfo<ARMFunctionInfo>(); 302 STI = &MF.getTarget().getSubtarget<ARMSubtarget>(); 303 304 isThumb = AFI->isThumbFunction(); 305 isThumb1 = AFI->isThumb1OnlyFunction(); 306 isThumb2 = AFI->isThumb2Function(); 307 308 HasFarJump = false; 309 HasInlineAsm = false; 310 311 // Renumber all of the machine basic blocks in the function, guaranteeing that 312 // the numbers agree with the position of the block in the function. 313 MF.RenumberBlocks(); 314 315 // Try to reorder and otherwise adjust the block layout to make good use 316 // of the TB[BH] instructions. 317 bool MadeChange = false; 318 if (isThumb2 && AdjustJumpTableBlocks) { 319 JumpTableFunctionScan(MF); 320 MadeChange |= ReorderThumb2JumpTables(MF); 321 // Data is out of date, so clear it. It'll be re-computed later. 322 T2JumpTables.clear(); 323 // Blocks may have shifted around. Keep the numbering up to date. 324 MF.RenumberBlocks(); 325 } 326 327 // Thumb1 functions containing constant pools get 4-byte alignment. 328 // This is so we can keep exact track of where the alignment padding goes. 329 330 // ARM and Thumb2 functions need to be 4-byte aligned. 331 if (!isThumb1) 332 MF.EnsureAlignment(2); // 2 = log2(4) 333 334 // Perform the initial placement of the constant pool entries. To start with, 335 // we put them all at the end of the function. 336 std::vector<MachineInstr*> CPEMIs; 337 if (!MCP.isEmpty()) { 338 DoInitialPlacement(MF, CPEMIs); 339 if (isThumb1) 340 MF.EnsureAlignment(2); // 2 = log2(4) 341 } 342 343 /// The next UID to take is the first unused one. 344 AFI->initPICLabelUId(CPEMIs.size()); 345 346 // Do the initial scan of the function, building up information about the 347 // sizes of each block, the location of all the water, and finding all of the 348 // constant pool users. 349 InitialFunctionScan(MF, CPEMIs); 350 CPEMIs.clear(); 351 DEBUG(dumpBBs()); 352 353 354 /// Remove dead constant pool entries. 355 MadeChange |= RemoveUnusedCPEntries(); 356 357 // Iteratively place constant pool entries and fix up branches until there 358 // is no change. 359 unsigned NoCPIters = 0, NoBRIters = 0; 360 while (true) { 361 bool CPChange = false; 362 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) 363 CPChange |= HandleConstantPoolUser(MF, i); 364 if (CPChange && ++NoCPIters > 30) 365 llvm_unreachable("Constant Island pass failed to converge!"); 366 DEBUG(dumpBBs()); 367 368 // Clear NewWaterList now. If we split a block for branches, it should 369 // appear as "new water" for the next iteration of constant pool placement. 370 NewWaterList.clear(); 371 372 bool BRChange = false; 373 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) 374 BRChange |= FixUpImmediateBr(MF, ImmBranches[i]); 375 if (BRChange && ++NoBRIters > 30) 376 llvm_unreachable("Branch Fix Up pass failed to converge!"); 377 DEBUG(dumpBBs()); 378 379 if (!CPChange && !BRChange) 380 break; 381 MadeChange = true; 382 } 383 384 // Shrink 32-bit Thumb2 branch, load, and store instructions. 385 if (isThumb2 && !STI->prefers32BitThumb()) 386 MadeChange |= OptimizeThumb2Instructions(MF); 387 388 // After a while, this might be made debug-only, but it is not expensive. 389 verify(MF); 390 391 // If LR has been forced spilled and no far jump (i.e. BL) has been issued, 392 // undo the spill / restore of LR if possible. 393 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump()) 394 MadeChange |= UndoLRSpillRestore(); 395 396 // Save the mapping between original and cloned constpool entries. 397 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) { 398 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) { 399 const CPEntry & CPE = CPEntries[i][j]; 400 AFI->recordCPEClone(i, CPE.CPI); 401 } 402 } 403 404 DEBUG(errs() << '\n'; dumpBBs()); 405 406 BBInfo.clear(); 407 WaterList.clear(); 408 CPUsers.clear(); 409 CPEntries.clear(); 410 ImmBranches.clear(); 411 PushPopMIs.clear(); 412 T2JumpTables.clear(); 413 414 return MadeChange; 415} 416 417/// DoInitialPlacement - Perform the initial placement of the constant pool 418/// entries. To start with, we put them all at the end of the function. 419void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF, 420 std::vector<MachineInstr*> &CPEMIs) { 421 // Create the basic block to hold the CPE's. 422 MachineBasicBlock *BB = MF.CreateMachineBasicBlock(); 423 MF.push_back(BB); 424 425 // Mark the basic block as 4-byte aligned as required by the const-pool. 426 BB->setAlignment(2); 427 428 // Add all of the constants from the constant pool to the end block, use an 429 // identity mapping of CPI's to CPE's. 430 const std::vector<MachineConstantPoolEntry> &CPs = 431 MF.getConstantPool()->getConstants(); 432 433 const TargetData &TD = *MF.getTarget().getTargetData(); 434 for (unsigned i = 0, e = CPs.size(); i != e; ++i) { 435 unsigned Size = TD.getTypeAllocSize(CPs[i].getType()); 436 // Verify that all constant pool entries are a multiple of 4 bytes. If not, 437 // we would have to pad them out or something so that instructions stay 438 // aligned. 439 assert((Size & 3) == 0 && "CP Entry not multiple of 4 bytes!"); 440 MachineInstr *CPEMI = 441 BuildMI(BB, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY)) 442 .addImm(i).addConstantPoolIndex(i).addImm(Size); 443 CPEMIs.push_back(CPEMI); 444 445 // Add a new CPEntry, but no corresponding CPUser yet. 446 std::vector<CPEntry> CPEs; 447 CPEs.push_back(CPEntry(CPEMI, i)); 448 CPEntries.push_back(CPEs); 449 ++NumCPEs; 450 DEBUG(errs() << "Moved CPI#" << i << " to end of function as #" << i 451 << "\n"); 452 } 453} 454 455/// BBHasFallthrough - Return true if the specified basic block can fallthrough 456/// into the block immediately after it. 457static bool BBHasFallthrough(MachineBasicBlock *MBB) { 458 // Get the next machine basic block in the function. 459 MachineFunction::iterator MBBI = MBB; 460 // Can't fall off end of function. 461 if (llvm::next(MBBI) == MBB->getParent()->end()) 462 return false; 463 464 MachineBasicBlock *NextBB = llvm::next(MBBI); 465 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 466 E = MBB->succ_end(); I != E; ++I) 467 if (*I == NextBB) 468 return true; 469 470 return false; 471} 472 473/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI, 474/// look up the corresponding CPEntry. 475ARMConstantIslands::CPEntry 476*ARMConstantIslands::findConstPoolEntry(unsigned CPI, 477 const MachineInstr *CPEMI) { 478 std::vector<CPEntry> &CPEs = CPEntries[CPI]; 479 // Number of entries per constpool index should be small, just do a 480 // linear search. 481 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) { 482 if (CPEs[i].CPEMI == CPEMI) 483 return &CPEs[i]; 484 } 485 return NULL; 486} 487 488/// JumpTableFunctionScan - Do a scan of the function, building up 489/// information about the sizes of each block and the locations of all 490/// the jump tables. 491void ARMConstantIslands::JumpTableFunctionScan(MachineFunction &MF) { 492 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 493 MBBI != E; ++MBBI) { 494 MachineBasicBlock &MBB = *MBBI; 495 496 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 497 I != E; ++I) 498 if (I->getDesc().isBranch() && I->getOpcode() == ARM::t2BR_JT) 499 T2JumpTables.push_back(I); 500 } 501} 502 503/// InitialFunctionScan - Do the initial scan of the function, building up 504/// information about the sizes of each block, the location of all the water, 505/// and finding all of the constant pool users. 506void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF, 507 const std::vector<MachineInstr*> &CPEMIs) { 508 // First thing, see if the function has any inline assembly in it. If so, 509 // we have to be conservative about alignment assumptions, as we don't 510 // know for sure the size of any instructions in the inline assembly. 511 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 512 MBBI != E; ++MBBI) { 513 MachineBasicBlock &MBB = *MBBI; 514 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 515 I != E; ++I) 516 if (I->getOpcode() == ARM::INLINEASM) 517 HasInlineAsm = true; 518 } 519 520 BBInfo.clear(); 521 BBInfo.resize(MF.getNumBlockIDs()); 522 523 // Now go back through the instructions and build up our data structures. 524 unsigned Offset = 0; 525 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 526 MBBI != E; ++MBBI) { 527 MachineBasicBlock &MBB = *MBBI; 528 BasicBlockInfo &BBI = BBInfo[MBB.getNumber()]; 529 BBI.Offset = Offset; 530 531 // If this block doesn't fall through into the next MBB, then this is 532 // 'water' that a constant pool island could be placed. 533 if (!BBHasFallthrough(&MBB)) 534 WaterList.push_back(&MBB); 535 536 unsigned MBBSize = 0; 537 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 538 I != E; ++I) { 539 if (I->isDebugValue()) 540 continue; 541 // Add instruction size to MBBSize. 542 MBBSize += TII->GetInstSizeInBytes(I); 543 544 // For inline asm, GetInstSizeInBytes returns a conservative estimate. 545 // The actual size may be smaller, but still a multiple of the instr size. 546 if (I->isInlineAsm()) 547 BBI.Unalign = isThumb ? 1 : 2; 548 549 int Opc = I->getOpcode(); 550 if (I->getDesc().isBranch()) { 551 bool isCond = false; 552 unsigned Bits = 0; 553 unsigned Scale = 1; 554 int UOpc = Opc; 555 switch (Opc) { 556 default: 557 continue; // Ignore other JT branches 558 case ARM::tBR_JTr: 559 // A Thumb1 table jump may involve padding; for the offsets to 560 // be right, functions containing these must be 4-byte aligned. 561 // tBR_JTr expands to a mov pc followed by .align 2 and then the jump 562 // table entries. So this code checks whether offset of tBR_JTr + 2 563 // is aligned. That is held in Offset+MBBSize, which already has 564 // 2 added in for the size of the mov pc instruction. 565 MF.EnsureAlignment(2U); 566 BBI.PostAlign = 2; 567 if ((Offset+MBBSize)%4 != 0 || HasInlineAsm) 568 // FIXME: Add a pseudo ALIGN instruction instead. 569 MBBSize += 2; // padding 570 continue; // Does not get an entry in ImmBranches 571 case ARM::t2BR_JT: 572 T2JumpTables.push_back(I); 573 continue; // Does not get an entry in ImmBranches 574 case ARM::Bcc: 575 isCond = true; 576 UOpc = ARM::B; 577 // Fallthrough 578 case ARM::B: 579 Bits = 24; 580 Scale = 4; 581 break; 582 case ARM::tBcc: 583 isCond = true; 584 UOpc = ARM::tB; 585 Bits = 8; 586 Scale = 2; 587 break; 588 case ARM::tB: 589 Bits = 11; 590 Scale = 2; 591 break; 592 case ARM::t2Bcc: 593 isCond = true; 594 UOpc = ARM::t2B; 595 Bits = 20; 596 Scale = 2; 597 break; 598 case ARM::t2B: 599 Bits = 24; 600 Scale = 2; 601 break; 602 } 603 604 // Record this immediate branch. 605 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale; 606 ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc)); 607 } 608 609 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET) 610 PushPopMIs.push_back(I); 611 612 if (Opc == ARM::CONSTPOOL_ENTRY) 613 continue; 614 615 // Scan the instructions for constant pool operands. 616 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) 617 if (I->getOperand(op).isCPI()) { 618 // We found one. The addressing mode tells us the max displacement 619 // from the PC that this instruction permits. 620 621 // Basic size info comes from the TSFlags field. 622 unsigned Bits = 0; 623 unsigned Scale = 1; 624 bool NegOk = false; 625 bool IsSoImm = false; 626 627 switch (Opc) { 628 default: 629 llvm_unreachable("Unknown addressing mode for CP reference!"); 630 break; 631 632 // Taking the address of a CP entry. 633 case ARM::LEApcrel: 634 // This takes a SoImm, which is 8 bit immediate rotated. We'll 635 // pretend the maximum offset is 255 * 4. Since each instruction 636 // 4 byte wide, this is always correct. We'll check for other 637 // displacements that fits in a SoImm as well. 638 Bits = 8; 639 Scale = 4; 640 NegOk = true; 641 IsSoImm = true; 642 break; 643 case ARM::t2LEApcrel: 644 Bits = 12; 645 NegOk = true; 646 break; 647 case ARM::tLEApcrel: 648 Bits = 8; 649 Scale = 4; 650 break; 651 652 case ARM::LDRi12: 653 case ARM::LDRcp: 654 case ARM::t2LDRpci: 655 Bits = 12; // +-offset_12 656 NegOk = true; 657 break; 658 659 case ARM::tLDRpci: 660 Bits = 8; 661 Scale = 4; // +(offset_8*4) 662 break; 663 664 case ARM::VLDRD: 665 case ARM::VLDRS: 666 Bits = 8; 667 Scale = 4; // +-(offset_8*4) 668 NegOk = true; 669 break; 670 } 671 672 // Remember that this is a user of a CP entry. 673 unsigned CPI = I->getOperand(op).getIndex(); 674 MachineInstr *CPEMI = CPEMIs[CPI]; 675 unsigned MaxOffs = ((1 << Bits)-1) * Scale; 676 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm)); 677 678 // Increment corresponding CPEntry reference count. 679 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI); 680 assert(CPE && "Cannot find a corresponding CPEntry!"); 681 CPE->RefCount++; 682 683 // Instructions can only use one CP entry, don't bother scanning the 684 // rest of the operands. 685 break; 686 } 687 } 688 689 // In thumb mode, if this block is a constpool island, we may need padding 690 // so it's aligned on 4 byte boundary. 691 if (isThumb && 692 !MBB.empty() && 693 MBB.begin()->getOpcode() == ARM::CONSTPOOL_ENTRY && 694 ((Offset%4) != 0 || HasInlineAsm)) 695 MBBSize += 2; 696 697 BBI.Size = MBBSize; 698 Offset += MBBSize; 699 } 700} 701 702/// ComputeBlockSize - Compute the size and some alignment information for MBB. 703/// This function updates BBInfo directly. 704void ARMConstantIslands::ComputeBlockSize(const MachineBasicBlock *MBB) { 705 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()]; 706 BBI.Size = 0; 707 BBI.Unalign = 0; 708 BBI.PostAlign = 0; 709 710 for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); 711 I != E; ++I) { 712 BBI.Size += TII->GetInstSizeInBytes(I); 713 // For inline asm, GetInstSizeInBytes returns a conservative estimate. 714 // The actual size may be smaller, but still a multiple of the instr size. 715 if (I->isInlineAsm()) 716 BBI.Unalign = isThumb ? 1 : 2; 717 } 718 719 // tBR_JTr contains a .align 2 directive. 720 if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) 721 BBI.PostAlign = 2; 722} 723 724/// GetOffsetOf - Return the current offset of the specified machine instruction 725/// from the start of the function. This offset changes as stuff is moved 726/// around inside the function. 727unsigned ARMConstantIslands::GetOffsetOf(MachineInstr *MI) const { 728 MachineBasicBlock *MBB = MI->getParent(); 729 730 // The offset is composed of two things: the sum of the sizes of all MBB's 731 // before this instruction's block, and the offset from the start of the block 732 // it is in. 733 unsigned Offset = BBInfo[MBB->getNumber()].Offset; 734 735 // If we're looking for a CONSTPOOL_ENTRY in Thumb, see if this block has 736 // alignment padding, and compensate if so. 737 if (isThumb && 738 MI->getOpcode() == ARM::CONSTPOOL_ENTRY && 739 (Offset%4 != 0 || HasInlineAsm)) 740 Offset += 2; 741 742 // Sum instructions before MI in MBB. 743 for (MachineBasicBlock::iterator I = MBB->begin(); ; ++I) { 744 assert(I != MBB->end() && "Didn't find MI in its own basic block?"); 745 if (&*I == MI) return Offset; 746 Offset += TII->GetInstSizeInBytes(I); 747 } 748} 749 750/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB 751/// ID. 752static bool CompareMBBNumbers(const MachineBasicBlock *LHS, 753 const MachineBasicBlock *RHS) { 754 return LHS->getNumber() < RHS->getNumber(); 755} 756 757/// UpdateForInsertedWaterBlock - When a block is newly inserted into the 758/// machine function, it upsets all of the block numbers. Renumber the blocks 759/// and update the arrays that parallel this numbering. 760void ARMConstantIslands::UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB) { 761 // Renumber the MBB's to keep them consecutive. 762 NewBB->getParent()->RenumberBlocks(NewBB); 763 764 // Insert an entry into BBInfo to align it properly with the (newly 765 // renumbered) block numbers. 766 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo()); 767 768 // Next, update WaterList. Specifically, we need to add NewMBB as having 769 // available water after it. 770 water_iterator IP = 771 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB, 772 CompareMBBNumbers); 773 WaterList.insert(IP, NewBB); 774} 775 776 777/// Split the basic block containing MI into two blocks, which are joined by 778/// an unconditional branch. Update data structures and renumber blocks to 779/// account for this change and returns the newly created block. 780MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) { 781 MachineBasicBlock *OrigBB = MI->getParent(); 782 MachineFunction &MF = *OrigBB->getParent(); 783 784 // Create a new MBB for the code after the OrigBB. 785 MachineBasicBlock *NewBB = 786 MF.CreateMachineBasicBlock(OrigBB->getBasicBlock()); 787 MachineFunction::iterator MBBI = OrigBB; ++MBBI; 788 MF.insert(MBBI, NewBB); 789 790 // Splice the instructions starting with MI over to NewBB. 791 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end()); 792 793 // Add an unconditional branch from OrigBB to NewBB. 794 // Note the new unconditional branch is not being recorded. 795 // There doesn't seem to be meaningful DebugInfo available; this doesn't 796 // correspond to anything in the source. 797 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B; 798 if (!isThumb) 799 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB); 800 else 801 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB) 802 .addImm(ARMCC::AL).addReg(0); 803 ++NumSplit; 804 805 // Update the CFG. All succs of OrigBB are now succs of NewBB. 806 NewBB->transferSuccessors(OrigBB); 807 808 // OrigBB branches to NewBB. 809 OrigBB->addSuccessor(NewBB); 810 811 // Update internal data structures to account for the newly inserted MBB. 812 // This is almost the same as UpdateForInsertedWaterBlock, except that 813 // the Water goes after OrigBB, not NewBB. 814 MF.RenumberBlocks(NewBB); 815 816 // Insert an entry into BBInfo to align it properly with the (newly 817 // renumbered) block numbers. 818 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo()); 819 820 // Next, update WaterList. Specifically, we need to add OrigMBB as having 821 // available water after it (but not if it's already there, which happens 822 // when splitting before a conditional branch that is followed by an 823 // unconditional branch - in that case we want to insert NewBB). 824 water_iterator IP = 825 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB, 826 CompareMBBNumbers); 827 MachineBasicBlock* WaterBB = *IP; 828 if (WaterBB == OrigBB) 829 WaterList.insert(llvm::next(IP), NewBB); 830 else 831 WaterList.insert(IP, OrigBB); 832 NewWaterList.insert(OrigBB); 833 834 unsigned OrigBBI = OrigBB->getNumber(); 835 unsigned NewBBI = NewBB->getNumber(); 836 837 int delta = isThumb1 ? 2 : 4; 838 839 // Figure out how large the OrigBB is. As the first half of the original 840 // block, it cannot contain a tablejump. The size includes 841 // the new jump we added. (It should be possible to do this without 842 // recounting everything, but it's very confusing, and this is rarely 843 // executed.) 844 ComputeBlockSize(OrigBB); 845 846 // ...and adjust BBOffsets for NewBB accordingly. 847 BBInfo[NewBBI].Offset = BBInfo[OrigBBI].postOffset(); 848 849 // Figure out how large the NewMBB is. As the second half of the original 850 // block, it may contain a tablejump. 851 ComputeBlockSize(NewBB); 852 853 MachineInstr* ThumbJTMI = prior(NewBB->end()); 854 if (ThumbJTMI->getOpcode() == ARM::tBR_JTr) { 855 // We've added another 2-byte instruction before this tablejump, which 856 // means we will always need padding if we didn't before, and vice versa. 857 858 // The original offset of the jump instruction was: 859 unsigned OrigOffset = BBInfo[OrigBBI].postOffset() - delta; 860 if (OrigOffset%4 == 0) { 861 // We had padding before and now we don't. No net change in code size. 862 delta = 0; 863 } else { 864 // We didn't have padding before and now we do. 865 BBInfo[NewBBI].Size += 2; 866 delta = 4; 867 } 868 } 869 870 // All BBOffsets following these blocks must be modified. 871 if (delta) 872 AdjustBBOffsetsAfter(NewBB, delta); 873 874 return NewBB; 875} 876 877/// OffsetIsInRange - Checks whether UserOffset (the location of a constant pool 878/// reference) is within MaxDisp of TrialOffset (a proposed location of a 879/// constant pool entry). 880bool ARMConstantIslands::OffsetIsInRange(unsigned UserOffset, 881 unsigned TrialOffset, unsigned MaxDisp, 882 bool NegativeOK, bool IsSoImm) { 883 // On Thumb offsets==2 mod 4 are rounded down by the hardware for 884 // purposes of the displacement computation; compensate for that here. 885 // Effectively, the valid range of displacements is 2 bytes smaller for such 886 // references. 887 unsigned TotalAdj = 0; 888 if (isThumb && UserOffset%4 !=0) { 889 UserOffset -= 2; 890 TotalAdj = 2; 891 } 892 // CPEs will be rounded up to a multiple of 4. 893 if (isThumb && TrialOffset%4 != 0) { 894 TrialOffset += 2; 895 TotalAdj += 2; 896 } 897 898 // In Thumb2 mode, later branch adjustments can shift instructions up and 899 // cause alignment change. In the worst case scenario this can cause the 900 // user's effective address to be subtracted by 2 and the CPE's address to 901 // be plus 2. 902 if (isThumb2 && TotalAdj != 4) 903 MaxDisp -= (4 - TotalAdj); 904 905 if (UserOffset <= TrialOffset) { 906 // User before the Trial. 907 if (TrialOffset - UserOffset <= MaxDisp) 908 return true; 909 // FIXME: Make use full range of soimm values. 910 } else if (NegativeOK) { 911 if (UserOffset - TrialOffset <= MaxDisp) 912 return true; 913 // FIXME: Make use full range of soimm values. 914 } 915 return false; 916} 917 918/// WaterIsInRange - Returns true if a CPE placed after the specified 919/// Water (a basic block) will be in range for the specific MI. 920 921bool ARMConstantIslands::WaterIsInRange(unsigned UserOffset, 922 MachineBasicBlock* Water, CPUser &U) { 923 unsigned MaxDisp = U.MaxDisp; 924 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(); 925 926 // If the CPE is to be inserted before the instruction, that will raise 927 // the offset of the instruction. 928 if (CPEOffset < UserOffset) 929 UserOffset += U.CPEMI->getOperand(2).getImm(); 930 931 return OffsetIsInRange(UserOffset, CPEOffset, MaxDisp, U.NegOk, U.IsSoImm); 932} 933 934/// CPEIsInRange - Returns true if the distance between specific MI and 935/// specific ConstPool entry instruction can fit in MI's displacement field. 936bool ARMConstantIslands::CPEIsInRange(MachineInstr *MI, unsigned UserOffset, 937 MachineInstr *CPEMI, unsigned MaxDisp, 938 bool NegOk, bool DoDump) { 939 unsigned CPEOffset = GetOffsetOf(CPEMI); 940 assert((CPEOffset%4 == 0 || HasInlineAsm) && "Misaligned CPE"); 941 942 if (DoDump) { 943 DEBUG(errs() << "User of CPE#" << CPEMI->getOperand(0).getImm() 944 << " max delta=" << MaxDisp 945 << " insn address=" << UserOffset 946 << " CPE address=" << CPEOffset 947 << " offset=" << int(CPEOffset-UserOffset) << "\t" << *MI); 948 } 949 950 return OffsetIsInRange(UserOffset, CPEOffset, MaxDisp, NegOk); 951} 952 953#ifndef NDEBUG 954/// BBIsJumpedOver - Return true of the specified basic block's only predecessor 955/// unconditionally branches to its only successor. 956static bool BBIsJumpedOver(MachineBasicBlock *MBB) { 957 if (MBB->pred_size() != 1 || MBB->succ_size() != 1) 958 return false; 959 960 MachineBasicBlock *Succ = *MBB->succ_begin(); 961 MachineBasicBlock *Pred = *MBB->pred_begin(); 962 MachineInstr *PredMI = &Pred->back(); 963 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB 964 || PredMI->getOpcode() == ARM::t2B) 965 return PredMI->getOperand(0).getMBB() == Succ; 966 return false; 967} 968#endif // NDEBUG 969 970void ARMConstantIslands::AdjustBBOffsetsAfter(MachineBasicBlock *BB, 971 int delta) { 972 MachineFunction::iterator MBBI = BB; MBBI = llvm::next(MBBI); 973 for(unsigned i = BB->getNumber()+1, e = BB->getParent()->getNumBlockIDs(); 974 i < e; ++i) { 975 BBInfo[i].Offset += delta; 976 // If some existing blocks have padding, adjust the padding as needed, a 977 // bit tricky. delta can be negative so don't use % on that. 978 if (!isThumb) 979 continue; 980 MachineBasicBlock *MBB = MBBI; 981 if (!MBB->empty() && !HasInlineAsm) { 982 // Constant pool entries require padding. 983 if (MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) { 984 unsigned OldOffset = BBInfo[i].Offset - delta; 985 if ((OldOffset%4) == 0 && (BBInfo[i].Offset%4) != 0) { 986 // add new padding 987 BBInfo[i].Size += 2; 988 delta += 2; 989 } else if ((OldOffset%4) != 0 && (BBInfo[i].Offset%4) == 0) { 990 // remove existing padding 991 BBInfo[i].Size -= 2; 992 delta -= 2; 993 } 994 } 995 // Thumb1 jump tables require padding. They should be at the end; 996 // following unconditional branches are removed by AnalyzeBranch. 997 // tBR_JTr expands to a mov pc followed by .align 2 and then the jump 998 // table entries. So this code checks whether offset of tBR_JTr 999 // is aligned; if it is, the offset of the jump table following the 1000 // instruction will not be aligned, and we need padding. 1001 MachineInstr *ThumbJTMI = prior(MBB->end()); 1002 if (ThumbJTMI->getOpcode() == ARM::tBR_JTr) { 1003 unsigned NewMIOffset = GetOffsetOf(ThumbJTMI); 1004 unsigned OldMIOffset = NewMIOffset - delta; 1005 if ((OldMIOffset%4) == 0 && (NewMIOffset%4) != 0) { 1006 // remove existing padding 1007 BBInfo[i].Size -= 2; 1008 delta -= 2; 1009 } else if ((OldMIOffset%4) != 0 && (NewMIOffset%4) == 0) { 1010 // add new padding 1011 BBInfo[i].Size += 2; 1012 delta += 2; 1013 } 1014 } 1015 if (delta==0) 1016 return; 1017 } 1018 MBBI = llvm::next(MBBI); 1019 } 1020} 1021 1022/// DecrementOldEntry - find the constant pool entry with index CPI 1023/// and instruction CPEMI, and decrement its refcount. If the refcount 1024/// becomes 0 remove the entry and instruction. Returns true if we removed 1025/// the entry, false if we didn't. 1026 1027bool ARMConstantIslands::DecrementOldEntry(unsigned CPI, MachineInstr *CPEMI) { 1028 // Find the old entry. Eliminate it if it is no longer used. 1029 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI); 1030 assert(CPE && "Unexpected!"); 1031 if (--CPE->RefCount == 0) { 1032 RemoveDeadCPEMI(CPEMI); 1033 CPE->CPEMI = NULL; 1034 --NumCPEs; 1035 return true; 1036 } 1037 return false; 1038} 1039 1040/// LookForCPEntryInRange - see if the currently referenced CPE is in range; 1041/// if not, see if an in-range clone of the CPE is in range, and if so, 1042/// change the data structures so the user references the clone. Returns: 1043/// 0 = no existing entry found 1044/// 1 = entry found, and there were no code insertions or deletions 1045/// 2 = entry found, and there were code insertions or deletions 1046int ARMConstantIslands::LookForExistingCPEntry(CPUser& U, unsigned UserOffset) 1047{ 1048 MachineInstr *UserMI = U.MI; 1049 MachineInstr *CPEMI = U.CPEMI; 1050 1051 // Check to see if the CPE is already in-range. 1052 if (CPEIsInRange(UserMI, UserOffset, CPEMI, U.MaxDisp, U.NegOk, true)) { 1053 DEBUG(errs() << "In range\n"); 1054 return 1; 1055 } 1056 1057 // No. Look for previously created clones of the CPE that are in range. 1058 unsigned CPI = CPEMI->getOperand(1).getIndex(); 1059 std::vector<CPEntry> &CPEs = CPEntries[CPI]; 1060 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) { 1061 // We already tried this one 1062 if (CPEs[i].CPEMI == CPEMI) 1063 continue; 1064 // Removing CPEs can leave empty entries, skip 1065 if (CPEs[i].CPEMI == NULL) 1066 continue; 1067 if (CPEIsInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.MaxDisp, U.NegOk)) { 1068 DEBUG(errs() << "Replacing CPE#" << CPI << " with CPE#" 1069 << CPEs[i].CPI << "\n"); 1070 // Point the CPUser node to the replacement 1071 U.CPEMI = CPEs[i].CPEMI; 1072 // Change the CPI in the instruction operand to refer to the clone. 1073 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j) 1074 if (UserMI->getOperand(j).isCPI()) { 1075 UserMI->getOperand(j).setIndex(CPEs[i].CPI); 1076 break; 1077 } 1078 // Adjust the refcount of the clone... 1079 CPEs[i].RefCount++; 1080 // ...and the original. If we didn't remove the old entry, none of the 1081 // addresses changed, so we don't need another pass. 1082 return DecrementOldEntry(CPI, CPEMI) ? 2 : 1; 1083 } 1084 } 1085 return 0; 1086} 1087 1088/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in 1089/// the specific unconditional branch instruction. 1090static inline unsigned getUnconditionalBrDisp(int Opc) { 1091 switch (Opc) { 1092 case ARM::tB: 1093 return ((1<<10)-1)*2; 1094 case ARM::t2B: 1095 return ((1<<23)-1)*2; 1096 default: 1097 break; 1098 } 1099 1100 return ((1<<23)-1)*4; 1101} 1102 1103/// LookForWater - Look for an existing entry in the WaterList in which 1104/// we can place the CPE referenced from U so it's within range of U's MI. 1105/// Returns true if found, false if not. If it returns true, WaterIter 1106/// is set to the WaterList entry. For Thumb, prefer water that will not 1107/// introduce padding to water that will. To ensure that this pass 1108/// terminates, the CPE location for a particular CPUser is only allowed to 1109/// move to a lower address, so search backward from the end of the list and 1110/// prefer the first water that is in range. 1111bool ARMConstantIslands::LookForWater(CPUser &U, unsigned UserOffset, 1112 water_iterator &WaterIter) { 1113 if (WaterList.empty()) 1114 return false; 1115 1116 bool FoundWaterThatWouldPad = false; 1117 water_iterator IPThatWouldPad; 1118 for (water_iterator IP = prior(WaterList.end()), 1119 B = WaterList.begin();; --IP) { 1120 MachineBasicBlock* WaterBB = *IP; 1121 // Check if water is in range and is either at a lower address than the 1122 // current "high water mark" or a new water block that was created since 1123 // the previous iteration by inserting an unconditional branch. In the 1124 // latter case, we want to allow resetting the high water mark back to 1125 // this new water since we haven't seen it before. Inserting branches 1126 // should be relatively uncommon and when it does happen, we want to be 1127 // sure to take advantage of it for all the CPEs near that block, so that 1128 // we don't insert more branches than necessary. 1129 if (WaterIsInRange(UserOffset, WaterBB, U) && 1130 (WaterBB->getNumber() < U.HighWaterMark->getNumber() || 1131 NewWaterList.count(WaterBB))) { 1132 unsigned WBBId = WaterBB->getNumber(); 1133 if (isThumb && BBInfo[WBBId].postOffset()%4 != 0) { 1134 // This is valid Water, but would introduce padding. Remember 1135 // it in case we don't find any Water that doesn't do this. 1136 if (!FoundWaterThatWouldPad) { 1137 FoundWaterThatWouldPad = true; 1138 IPThatWouldPad = IP; 1139 } 1140 } else { 1141 WaterIter = IP; 1142 return true; 1143 } 1144 } 1145 if (IP == B) 1146 break; 1147 } 1148 if (FoundWaterThatWouldPad) { 1149 WaterIter = IPThatWouldPad; 1150 return true; 1151 } 1152 return false; 1153} 1154 1155/// CreateNewWater - No existing WaterList entry will work for 1156/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the 1157/// block is used if in range, and the conditional branch munged so control 1158/// flow is correct. Otherwise the block is split to create a hole with an 1159/// unconditional branch around it. In either case NewMBB is set to a 1160/// block following which the new island can be inserted (the WaterList 1161/// is not adjusted). 1162void ARMConstantIslands::CreateNewWater(unsigned CPUserIndex, 1163 unsigned UserOffset, 1164 MachineBasicBlock *&NewMBB) { 1165 CPUser &U = CPUsers[CPUserIndex]; 1166 MachineInstr *UserMI = U.MI; 1167 MachineInstr *CPEMI = U.CPEMI; 1168 MachineBasicBlock *UserMBB = UserMI->getParent(); 1169 unsigned OffsetOfNextBlock = BBInfo[UserMBB->getNumber()].postOffset(); 1170 assert(OffsetOfNextBlock == BBInfo[UserMBB->getNumber()+1].Offset); 1171 1172 // If the block does not end in an unconditional branch already, and if the 1173 // end of the block is within range, make new water there. (The addition 1174 // below is for the unconditional branch we will be adding: 4 bytes on ARM + 1175 // Thumb2, 2 on Thumb1. Possible Thumb1 alignment padding is allowed for 1176 // inside OffsetIsInRange. 1177 if (BBHasFallthrough(UserMBB) && 1178 OffsetIsInRange(UserOffset, OffsetOfNextBlock + (isThumb1 ? 2: 4), 1179 U.MaxDisp, U.NegOk, U.IsSoImm)) { 1180 DEBUG(errs() << "Split at end of block\n"); 1181 if (&UserMBB->back() == UserMI) 1182 assert(BBHasFallthrough(UserMBB) && "Expected a fallthrough BB!"); 1183 NewMBB = llvm::next(MachineFunction::iterator(UserMBB)); 1184 // Add an unconditional branch from UserMBB to fallthrough block. 1185 // Record it for branch lengthening; this new branch will not get out of 1186 // range, but if the preceding conditional branch is out of range, the 1187 // targets will be exchanged, and the altered branch may be out of 1188 // range, so the machinery has to know about it. 1189 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B; 1190 if (!isThumb) 1191 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB); 1192 else 1193 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB) 1194 .addImm(ARMCC::AL).addReg(0); 1195 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr); 1196 ImmBranches.push_back(ImmBranch(&UserMBB->back(), 1197 MaxDisp, false, UncondBr)); 1198 int delta = isThumb1 ? 2 : 4; 1199 BBInfo[UserMBB->getNumber()].Size += delta; 1200 AdjustBBOffsetsAfter(UserMBB, delta); 1201 } else { 1202 // What a big block. Find a place within the block to split it. 1203 // This is a little tricky on Thumb1 since instructions are 2 bytes 1204 // and constant pool entries are 4 bytes: if instruction I references 1205 // island CPE, and instruction I+1 references CPE', it will 1206 // not work well to put CPE as far forward as possible, since then 1207 // CPE' cannot immediately follow it (that location is 2 bytes 1208 // farther away from I+1 than CPE was from I) and we'd need to create 1209 // a new island. So, we make a first guess, then walk through the 1210 // instructions between the one currently being looked at and the 1211 // possible insertion point, and make sure any other instructions 1212 // that reference CPEs will be able to use the same island area; 1213 // if not, we back up the insertion point. 1214 1215 // The 4 in the following is for the unconditional branch we'll be 1216 // inserting (allows for long branch on Thumb1). Alignment of the 1217 // island is handled inside OffsetIsInRange. 1218 unsigned BaseInsertOffset = UserOffset + U.MaxDisp -4; 1219 // This could point off the end of the block if we've already got 1220 // constant pool entries following this block; only the last one is 1221 // in the water list. Back past any possible branches (allow for a 1222 // conditional and a maximally long unconditional). 1223 if (BaseInsertOffset >= BBInfo[UserMBB->getNumber()+1].Offset) 1224 BaseInsertOffset = BBInfo[UserMBB->getNumber()+1].Offset - 1225 (isThumb1 ? 6 : 8); 1226 unsigned EndInsertOffset = BaseInsertOffset + 1227 CPEMI->getOperand(2).getImm(); 1228 MachineBasicBlock::iterator MI = UserMI; 1229 ++MI; 1230 unsigned CPUIndex = CPUserIndex+1; 1231 unsigned NumCPUsers = CPUsers.size(); 1232 MachineInstr *LastIT = 0; 1233 for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI); 1234 Offset < BaseInsertOffset; 1235 Offset += TII->GetInstSizeInBytes(MI), 1236 MI = llvm::next(MI)) { 1237 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) { 1238 CPUser &U = CPUsers[CPUIndex]; 1239 if (!OffsetIsInRange(Offset, EndInsertOffset, 1240 U.MaxDisp, U.NegOk, U.IsSoImm)) { 1241 BaseInsertOffset -= (isThumb1 ? 2 : 4); 1242 EndInsertOffset -= (isThumb1 ? 2 : 4); 1243 } 1244 // This is overly conservative, as we don't account for CPEMIs 1245 // being reused within the block, but it doesn't matter much. 1246 EndInsertOffset += CPUsers[CPUIndex].CPEMI->getOperand(2).getImm(); 1247 CPUIndex++; 1248 } 1249 1250 // Remember the last IT instruction. 1251 if (MI->getOpcode() == ARM::t2IT) 1252 LastIT = MI; 1253 } 1254 1255 DEBUG(errs() << "Split in middle of big block\n"); 1256 --MI; 1257 1258 // Avoid splitting an IT block. 1259 if (LastIT) { 1260 unsigned PredReg = 0; 1261 ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg); 1262 if (CC != ARMCC::AL) 1263 MI = LastIT; 1264 } 1265 NewMBB = SplitBlockBeforeInstr(MI); 1266 } 1267} 1268 1269/// HandleConstantPoolUser - Analyze the specified user, checking to see if it 1270/// is out-of-range. If so, pick up the constant pool value and move it some 1271/// place in-range. Return true if we changed any addresses (thus must run 1272/// another pass of branch lengthening), false otherwise. 1273bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF, 1274 unsigned CPUserIndex) { 1275 CPUser &U = CPUsers[CPUserIndex]; 1276 MachineInstr *UserMI = U.MI; 1277 MachineInstr *CPEMI = U.CPEMI; 1278 unsigned CPI = CPEMI->getOperand(1).getIndex(); 1279 unsigned Size = CPEMI->getOperand(2).getImm(); 1280 // Compute this only once, it's expensive. The 4 or 8 is the value the 1281 // hardware keeps in the PC. 1282 unsigned UserOffset = GetOffsetOf(UserMI) + (isThumb ? 4 : 8); 1283 1284 // See if the current entry is within range, or there is a clone of it 1285 // in range. 1286 int result = LookForExistingCPEntry(U, UserOffset); 1287 if (result==1) return false; 1288 else if (result==2) return true; 1289 1290 // No existing clone of this CPE is within range. 1291 // We will be generating a new clone. Get a UID for it. 1292 unsigned ID = AFI->createPICLabelUId(); 1293 1294 // Look for water where we can place this CPE. 1295 MachineBasicBlock *NewIsland = MF.CreateMachineBasicBlock(); 1296 MachineBasicBlock *NewMBB; 1297 water_iterator IP; 1298 if (LookForWater(U, UserOffset, IP)) { 1299 DEBUG(errs() << "found water in range\n"); 1300 MachineBasicBlock *WaterBB = *IP; 1301 1302 // If the original WaterList entry was "new water" on this iteration, 1303 // propagate that to the new island. This is just keeping NewWaterList 1304 // updated to match the WaterList, which will be updated below. 1305 if (NewWaterList.count(WaterBB)) { 1306 NewWaterList.erase(WaterBB); 1307 NewWaterList.insert(NewIsland); 1308 } 1309 // The new CPE goes before the following block (NewMBB). 1310 NewMBB = llvm::next(MachineFunction::iterator(WaterBB)); 1311 1312 } else { 1313 // No water found. 1314 DEBUG(errs() << "No water found\n"); 1315 CreateNewWater(CPUserIndex, UserOffset, NewMBB); 1316 1317 // SplitBlockBeforeInstr adds to WaterList, which is important when it is 1318 // called while handling branches so that the water will be seen on the 1319 // next iteration for constant pools, but in this context, we don't want 1320 // it. Check for this so it will be removed from the WaterList. 1321 // Also remove any entry from NewWaterList. 1322 MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB)); 1323 IP = std::find(WaterList.begin(), WaterList.end(), WaterBB); 1324 if (IP != WaterList.end()) 1325 NewWaterList.erase(WaterBB); 1326 1327 // We are adding new water. Update NewWaterList. 1328 NewWaterList.insert(NewIsland); 1329 } 1330 1331 // Remove the original WaterList entry; we want subsequent insertions in 1332 // this vicinity to go after the one we're about to insert. This 1333 // considerably reduces the number of times we have to move the same CPE 1334 // more than once and is also important to ensure the algorithm terminates. 1335 if (IP != WaterList.end()) 1336 WaterList.erase(IP); 1337 1338 // Okay, we know we can put an island before NewMBB now, do it! 1339 MF.insert(NewMBB, NewIsland); 1340 1341 // Update internal data structures to account for the newly inserted MBB. 1342 UpdateForInsertedWaterBlock(NewIsland); 1343 1344 // Decrement the old entry, and remove it if refcount becomes 0. 1345 DecrementOldEntry(CPI, CPEMI); 1346 1347 // Now that we have an island to add the CPE to, clone the original CPE and 1348 // add it to the island. 1349 U.HighWaterMark = NewIsland; 1350 U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY)) 1351 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size); 1352 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1)); 1353 ++NumCPEs; 1354 1355 // Mark the basic block as 4-byte aligned as required by the const-pool entry. 1356 NewIsland->setAlignment(2); 1357 1358 BBInfo[NewIsland->getNumber()].Offset = BBInfo[NewMBB->getNumber()].Offset; 1359 // Compensate for .align 2 in thumb mode. 1360 if (isThumb && (BBInfo[NewIsland->getNumber()].Offset%4 != 0 || HasInlineAsm)) 1361 Size += 2; 1362 // Increase the size of the island block to account for the new entry. 1363 BBInfo[NewIsland->getNumber()].Size += Size; 1364 AdjustBBOffsetsAfter(NewIsland, Size); 1365 1366 // Finally, change the CPI in the instruction operand to be ID. 1367 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i) 1368 if (UserMI->getOperand(i).isCPI()) { 1369 UserMI->getOperand(i).setIndex(ID); 1370 break; 1371 } 1372 1373 DEBUG(errs() << " Moved CPE to #" << ID << " CPI=" << CPI 1374 << '\t' << *UserMI); 1375 1376 return true; 1377} 1378 1379/// RemoveDeadCPEMI - Remove a dead constant pool entry instruction. Update 1380/// sizes and offsets of impacted basic blocks. 1381void ARMConstantIslands::RemoveDeadCPEMI(MachineInstr *CPEMI) { 1382 MachineBasicBlock *CPEBB = CPEMI->getParent(); 1383 unsigned Size = CPEMI->getOperand(2).getImm(); 1384 CPEMI->eraseFromParent(); 1385 BBInfo[CPEBB->getNumber()].Size -= Size; 1386 // All succeeding offsets have the current size value added in, fix this. 1387 if (CPEBB->empty()) { 1388 // In thumb1 mode, the size of island may be padded by two to compensate for 1389 // the alignment requirement. Then it will now be 2 when the block is 1390 // empty, so fix this. 1391 // All succeeding offsets have the current size value added in, fix this. 1392 if (BBInfo[CPEBB->getNumber()].Size != 0) { 1393 Size += BBInfo[CPEBB->getNumber()].Size; 1394 BBInfo[CPEBB->getNumber()].Size = 0; 1395 } 1396 1397 // This block no longer needs to be aligned. <rdar://problem/10534709>. 1398 CPEBB->setAlignment(0); 1399 } 1400 AdjustBBOffsetsAfter(CPEBB, -Size); 1401 // An island has only one predecessor BB and one successor BB. Check if 1402 // this BB's predecessor jumps directly to this BB's successor. This 1403 // shouldn't happen currently. 1404 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?"); 1405 // FIXME: remove the empty blocks after all the work is done? 1406} 1407 1408/// RemoveUnusedCPEntries - Remove constant pool entries whose refcounts 1409/// are zero. 1410bool ARMConstantIslands::RemoveUnusedCPEntries() { 1411 unsigned MadeChange = false; 1412 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) { 1413 std::vector<CPEntry> &CPEs = CPEntries[i]; 1414 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) { 1415 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) { 1416 RemoveDeadCPEMI(CPEs[j].CPEMI); 1417 CPEs[j].CPEMI = NULL; 1418 MadeChange = true; 1419 } 1420 } 1421 } 1422 return MadeChange; 1423} 1424 1425/// BBIsInRange - Returns true if the distance between specific MI and 1426/// specific BB can fit in MI's displacement field. 1427bool ARMConstantIslands::BBIsInRange(MachineInstr *MI,MachineBasicBlock *DestBB, 1428 unsigned MaxDisp) { 1429 unsigned PCAdj = isThumb ? 4 : 8; 1430 unsigned BrOffset = GetOffsetOf(MI) + PCAdj; 1431 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; 1432 1433 DEBUG(errs() << "Branch of destination BB#" << DestBB->getNumber() 1434 << " from BB#" << MI->getParent()->getNumber() 1435 << " max delta=" << MaxDisp 1436 << " from " << GetOffsetOf(MI) << " to " << DestOffset 1437 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI); 1438 1439 if (BrOffset <= DestOffset) { 1440 // Branch before the Dest. 1441 if (DestOffset-BrOffset <= MaxDisp) 1442 return true; 1443 } else { 1444 if (BrOffset-DestOffset <= MaxDisp) 1445 return true; 1446 } 1447 return false; 1448} 1449 1450/// FixUpImmediateBr - Fix up an immediate branch whose destination is too far 1451/// away to fit in its displacement field. 1452bool ARMConstantIslands::FixUpImmediateBr(MachineFunction &MF, ImmBranch &Br) { 1453 MachineInstr *MI = Br.MI; 1454 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB(); 1455 1456 // Check to see if the DestBB is already in-range. 1457 if (BBIsInRange(MI, DestBB, Br.MaxDisp)) 1458 return false; 1459 1460 if (!Br.isCond) 1461 return FixUpUnconditionalBr(MF, Br); 1462 return FixUpConditionalBr(MF, Br); 1463} 1464 1465/// FixUpUnconditionalBr - Fix up an unconditional branch whose destination is 1466/// too far away to fit in its displacement field. If the LR register has been 1467/// spilled in the epilogue, then we can use BL to implement a far jump. 1468/// Otherwise, add an intermediate branch instruction to a branch. 1469bool 1470ARMConstantIslands::FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br) { 1471 MachineInstr *MI = Br.MI; 1472 MachineBasicBlock *MBB = MI->getParent(); 1473 if (!isThumb1) 1474 llvm_unreachable("FixUpUnconditionalBr is Thumb1 only!"); 1475 1476 // Use BL to implement far jump. 1477 Br.MaxDisp = (1 << 21) * 2; 1478 MI->setDesc(TII->get(ARM::tBfar)); 1479 BBInfo[MBB->getNumber()].Size += 2; 1480 AdjustBBOffsetsAfter(MBB, 2); 1481 HasFarJump = true; 1482 ++NumUBrFixed; 1483 1484 DEBUG(errs() << " Changed B to long jump " << *MI); 1485 1486 return true; 1487} 1488 1489/// FixUpConditionalBr - Fix up a conditional branch whose destination is too 1490/// far away to fit in its displacement field. It is converted to an inverse 1491/// conditional branch + an unconditional branch to the destination. 1492bool 1493ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) { 1494 MachineInstr *MI = Br.MI; 1495 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB(); 1496 1497 // Add an unconditional branch to the destination and invert the branch 1498 // condition to jump over it: 1499 // blt L1 1500 // => 1501 // bge L2 1502 // b L1 1503 // L2: 1504 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm(); 1505 CC = ARMCC::getOppositeCondition(CC); 1506 unsigned CCReg = MI->getOperand(2).getReg(); 1507 1508 // If the branch is at the end of its MBB and that has a fall-through block, 1509 // direct the updated conditional branch to the fall-through block. Otherwise, 1510 // split the MBB before the next instruction. 1511 MachineBasicBlock *MBB = MI->getParent(); 1512 MachineInstr *BMI = &MBB->back(); 1513 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB); 1514 1515 ++NumCBrFixed; 1516 if (BMI != MI) { 1517 if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && 1518 BMI->getOpcode() == Br.UncondBr) { 1519 // Last MI in the BB is an unconditional branch. Can we simply invert the 1520 // condition and swap destinations: 1521 // beq L1 1522 // b L2 1523 // => 1524 // bne L2 1525 // b L1 1526 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB(); 1527 if (BBIsInRange(MI, NewDest, Br.MaxDisp)) { 1528 DEBUG(errs() << " Invert Bcc condition and swap its destination with " 1529 << *BMI); 1530 BMI->getOperand(0).setMBB(DestBB); 1531 MI->getOperand(0).setMBB(NewDest); 1532 MI->getOperand(1).setImm(CC); 1533 return true; 1534 } 1535 } 1536 } 1537 1538 if (NeedSplit) { 1539 SplitBlockBeforeInstr(MI); 1540 // No need for the branch to the next block. We're adding an unconditional 1541 // branch to the destination. 1542 int delta = TII->GetInstSizeInBytes(&MBB->back()); 1543 BBInfo[MBB->getNumber()].Size -= delta; 1544 MachineBasicBlock* SplitBB = llvm::next(MachineFunction::iterator(MBB)); 1545 AdjustBBOffsetsAfter(SplitBB, -delta); 1546 MBB->back().eraseFromParent(); 1547 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below 1548 } 1549 MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); 1550 1551 DEBUG(errs() << " Insert B to BB#" << DestBB->getNumber() 1552 << " also invert condition and change dest. to BB#" 1553 << NextBB->getNumber() << "\n"); 1554 1555 // Insert a new conditional branch and a new unconditional branch. 1556 // Also update the ImmBranch as well as adding a new entry for the new branch. 1557 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode())) 1558 .addMBB(NextBB).addImm(CC).addReg(CCReg); 1559 Br.MI = &MBB->back(); 1560 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back()); 1561 if (isThumb) 1562 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB) 1563 .addImm(ARMCC::AL).addReg(0); 1564 else 1565 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB); 1566 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back()); 1567 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr); 1568 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr)); 1569 1570 // Remove the old conditional branch. It may or may not still be in MBB. 1571 BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI); 1572 MI->eraseFromParent(); 1573 1574 // The net size change is an addition of one unconditional branch. 1575 int delta = TII->GetInstSizeInBytes(&MBB->back()); 1576 AdjustBBOffsetsAfter(MBB, delta); 1577 return true; 1578} 1579 1580/// UndoLRSpillRestore - Remove Thumb push / pop instructions that only spills 1581/// LR / restores LR to pc. FIXME: This is done here because it's only possible 1582/// to do this if tBfar is not used. 1583bool ARMConstantIslands::UndoLRSpillRestore() { 1584 bool MadeChange = false; 1585 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) { 1586 MachineInstr *MI = PushPopMIs[i]; 1587 // First two operands are predicates. 1588 if (MI->getOpcode() == ARM::tPOP_RET && 1589 MI->getOperand(2).getReg() == ARM::PC && 1590 MI->getNumExplicitOperands() == 3) { 1591 // Create the new insn and copy the predicate from the old. 1592 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET)) 1593 .addOperand(MI->getOperand(0)) 1594 .addOperand(MI->getOperand(1)); 1595 MI->eraseFromParent(); 1596 MadeChange = true; 1597 } 1598 } 1599 return MadeChange; 1600} 1601 1602bool ARMConstantIslands::OptimizeThumb2Instructions(MachineFunction &MF) { 1603 bool MadeChange = false; 1604 1605 // Shrink ADR and LDR from constantpool. 1606 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) { 1607 CPUser &U = CPUsers[i]; 1608 unsigned Opcode = U.MI->getOpcode(); 1609 unsigned NewOpc = 0; 1610 unsigned Scale = 1; 1611 unsigned Bits = 0; 1612 switch (Opcode) { 1613 default: break; 1614 case ARM::t2LEApcrel: 1615 if (isARMLowRegister(U.MI->getOperand(0).getReg())) { 1616 NewOpc = ARM::tLEApcrel; 1617 Bits = 8; 1618 Scale = 4; 1619 } 1620 break; 1621 case ARM::t2LDRpci: 1622 if (isARMLowRegister(U.MI->getOperand(0).getReg())) { 1623 NewOpc = ARM::tLDRpci; 1624 Bits = 8; 1625 Scale = 4; 1626 } 1627 break; 1628 } 1629 1630 if (!NewOpc) 1631 continue; 1632 1633 unsigned UserOffset = GetOffsetOf(U.MI) + 4; 1634 unsigned MaxOffs = ((1 << Bits) - 1) * Scale; 1635 // FIXME: Check if offset is multiple of scale if scale is not 4. 1636 if (CPEIsInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) { 1637 U.MI->setDesc(TII->get(NewOpc)); 1638 MachineBasicBlock *MBB = U.MI->getParent(); 1639 BBInfo[MBB->getNumber()].Size -= 2; 1640 AdjustBBOffsetsAfter(MBB, -2); 1641 ++NumT2CPShrunk; 1642 MadeChange = true; 1643 } 1644 } 1645 1646 MadeChange |= OptimizeThumb2Branches(MF); 1647 MadeChange |= OptimizeThumb2JumpTables(MF); 1648 return MadeChange; 1649} 1650 1651bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) { 1652 bool MadeChange = false; 1653 1654 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) { 1655 ImmBranch &Br = ImmBranches[i]; 1656 unsigned Opcode = Br.MI->getOpcode(); 1657 unsigned NewOpc = 0; 1658 unsigned Scale = 1; 1659 unsigned Bits = 0; 1660 switch (Opcode) { 1661 default: break; 1662 case ARM::t2B: 1663 NewOpc = ARM::tB; 1664 Bits = 11; 1665 Scale = 2; 1666 break; 1667 case ARM::t2Bcc: { 1668 NewOpc = ARM::tBcc; 1669 Bits = 8; 1670 Scale = 2; 1671 break; 1672 } 1673 } 1674 if (NewOpc) { 1675 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale; 1676 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB(); 1677 if (BBIsInRange(Br.MI, DestBB, MaxOffs)) { 1678 Br.MI->setDesc(TII->get(NewOpc)); 1679 MachineBasicBlock *MBB = Br.MI->getParent(); 1680 BBInfo[MBB->getNumber()].Size -= 2; 1681 AdjustBBOffsetsAfter(MBB, -2); 1682 ++NumT2BrShrunk; 1683 MadeChange = true; 1684 } 1685 } 1686 1687 Opcode = Br.MI->getOpcode(); 1688 if (Opcode != ARM::tBcc) 1689 continue; 1690 1691 NewOpc = 0; 1692 unsigned PredReg = 0; 1693 ARMCC::CondCodes Pred = llvm::getInstrPredicate(Br.MI, PredReg); 1694 if (Pred == ARMCC::EQ) 1695 NewOpc = ARM::tCBZ; 1696 else if (Pred == ARMCC::NE) 1697 NewOpc = ARM::tCBNZ; 1698 if (!NewOpc) 1699 continue; 1700 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB(); 1701 // Check if the distance is within 126. Subtract starting offset by 2 1702 // because the cmp will be eliminated. 1703 unsigned BrOffset = GetOffsetOf(Br.MI) + 4 - 2; 1704 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; 1705 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) { 1706 MachineBasicBlock::iterator CmpMI = Br.MI; 1707 if (CmpMI != Br.MI->getParent()->begin()) { 1708 --CmpMI; 1709 if (CmpMI->getOpcode() == ARM::tCMPi8) { 1710 unsigned Reg = CmpMI->getOperand(0).getReg(); 1711 Pred = llvm::getInstrPredicate(CmpMI, PredReg); 1712 if (Pred == ARMCC::AL && 1713 CmpMI->getOperand(1).getImm() == 0 && 1714 isARMLowRegister(Reg)) { 1715 MachineBasicBlock *MBB = Br.MI->getParent(); 1716 MachineInstr *NewBR = 1717 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc)) 1718 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags()); 1719 CmpMI->eraseFromParent(); 1720 Br.MI->eraseFromParent(); 1721 Br.MI = NewBR; 1722 BBInfo[MBB->getNumber()].Size -= 2; 1723 AdjustBBOffsetsAfter(MBB, -2); 1724 ++NumCBZ; 1725 MadeChange = true; 1726 } 1727 } 1728 } 1729 } 1730 } 1731 1732 return MadeChange; 1733} 1734 1735/// OptimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller 1736/// jumptables when it's possible. 1737bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) { 1738 bool MadeChange = false; 1739 1740 // FIXME: After the tables are shrunk, can we get rid some of the 1741 // constantpool tables? 1742 MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 1743 if (MJTI == 0) return false; 1744 1745 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1746 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { 1747 MachineInstr *MI = T2JumpTables[i]; 1748 const MCInstrDesc &MCID = MI->getDesc(); 1749 unsigned NumOps = MCID.getNumOperands(); 1750 unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2); 1751 MachineOperand JTOP = MI->getOperand(JTOpIdx); 1752 unsigned JTI = JTOP.getIndex(); 1753 assert(JTI < JT.size()); 1754 1755 bool ByteOk = true; 1756 bool HalfWordOk = true; 1757 unsigned JTOffset = GetOffsetOf(MI) + 4; 1758 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs; 1759 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) { 1760 MachineBasicBlock *MBB = JTBBs[j]; 1761 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset; 1762 // Negative offset is not ok. FIXME: We should change BB layout to make 1763 // sure all the branches are forward. 1764 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2) 1765 ByteOk = false; 1766 unsigned TBHLimit = ((1<<16)-1)*2; 1767 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit) 1768 HalfWordOk = false; 1769 if (!ByteOk && !HalfWordOk) 1770 break; 1771 } 1772 1773 if (ByteOk || HalfWordOk) { 1774 MachineBasicBlock *MBB = MI->getParent(); 1775 unsigned BaseReg = MI->getOperand(0).getReg(); 1776 bool BaseRegKill = MI->getOperand(0).isKill(); 1777 if (!BaseRegKill) 1778 continue; 1779 unsigned IdxReg = MI->getOperand(1).getReg(); 1780 bool IdxRegKill = MI->getOperand(1).isKill(); 1781 1782 // Scan backwards to find the instruction that defines the base 1783 // register. Due to post-RA scheduling, we can't count on it 1784 // immediately preceding the branch instruction. 1785 MachineBasicBlock::iterator PrevI = MI; 1786 MachineBasicBlock::iterator B = MBB->begin(); 1787 while (PrevI != B && !PrevI->definesRegister(BaseReg)) 1788 --PrevI; 1789 1790 // If for some reason we didn't find it, we can't do anything, so 1791 // just skip this one. 1792 if (!PrevI->definesRegister(BaseReg)) 1793 continue; 1794 1795 MachineInstr *AddrMI = PrevI; 1796 bool OptOk = true; 1797 // Examine the instruction that calculates the jumptable entry address. 1798 // Make sure it only defines the base register and kills any uses 1799 // other than the index register. 1800 for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) { 1801 const MachineOperand &MO = AddrMI->getOperand(k); 1802 if (!MO.isReg() || !MO.getReg()) 1803 continue; 1804 if (MO.isDef() && MO.getReg() != BaseReg) { 1805 OptOk = false; 1806 break; 1807 } 1808 if (MO.isUse() && !MO.isKill() && MO.getReg() != IdxReg) { 1809 OptOk = false; 1810 break; 1811 } 1812 } 1813 if (!OptOk) 1814 continue; 1815 1816 // Now scan back again to find the tLEApcrel or t2LEApcrelJT instruction 1817 // that gave us the initial base register definition. 1818 for (--PrevI; PrevI != B && !PrevI->definesRegister(BaseReg); --PrevI) 1819 ; 1820 1821 // The instruction should be a tLEApcrel or t2LEApcrelJT; we want 1822 // to delete it as well. 1823 MachineInstr *LeaMI = PrevI; 1824 if ((LeaMI->getOpcode() != ARM::tLEApcrelJT && 1825 LeaMI->getOpcode() != ARM::t2LEApcrelJT) || 1826 LeaMI->getOperand(0).getReg() != BaseReg) 1827 OptOk = false; 1828 1829 if (!OptOk) 1830 continue; 1831 1832 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT; 1833 MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc)) 1834 .addReg(IdxReg, getKillRegState(IdxRegKill)) 1835 .addJumpTableIndex(JTI, JTOP.getTargetFlags()) 1836 .addImm(MI->getOperand(JTOpIdx+1).getImm()); 1837 // FIXME: Insert an "ALIGN" instruction to ensure the next instruction 1838 // is 2-byte aligned. For now, asm printer will fix it up. 1839 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI); 1840 unsigned OrigSize = TII->GetInstSizeInBytes(AddrMI); 1841 OrigSize += TII->GetInstSizeInBytes(LeaMI); 1842 OrigSize += TII->GetInstSizeInBytes(MI); 1843 1844 AddrMI->eraseFromParent(); 1845 LeaMI->eraseFromParent(); 1846 MI->eraseFromParent(); 1847 1848 int delta = OrigSize - NewSize; 1849 BBInfo[MBB->getNumber()].Size -= delta; 1850 AdjustBBOffsetsAfter(MBB, -delta); 1851 1852 ++NumTBs; 1853 MadeChange = true; 1854 } 1855 } 1856 1857 return MadeChange; 1858} 1859 1860/// ReorderThumb2JumpTables - Adjust the function's block layout to ensure that 1861/// jump tables always branch forwards, since that's what tbb and tbh need. 1862bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) { 1863 bool MadeChange = false; 1864 1865 MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 1866 if (MJTI == 0) return false; 1867 1868 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1869 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { 1870 MachineInstr *MI = T2JumpTables[i]; 1871 const MCInstrDesc &MCID = MI->getDesc(); 1872 unsigned NumOps = MCID.getNumOperands(); 1873 unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2); 1874 MachineOperand JTOP = MI->getOperand(JTOpIdx); 1875 unsigned JTI = JTOP.getIndex(); 1876 assert(JTI < JT.size()); 1877 1878 // We prefer if target blocks for the jump table come after the jump 1879 // instruction so we can use TB[BH]. Loop through the target blocks 1880 // and try to adjust them such that that's true. 1881 int JTNumber = MI->getParent()->getNumber(); 1882 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs; 1883 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) { 1884 MachineBasicBlock *MBB = JTBBs[j]; 1885 int DTNumber = MBB->getNumber(); 1886 1887 if (DTNumber < JTNumber) { 1888 // The destination precedes the switch. Try to move the block forward 1889 // so we have a positive offset. 1890 MachineBasicBlock *NewBB = 1891 AdjustJTTargetBlockForward(MBB, MI->getParent()); 1892 if (NewBB) 1893 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB); 1894 MadeChange = true; 1895 } 1896 } 1897 } 1898 1899 return MadeChange; 1900} 1901 1902MachineBasicBlock *ARMConstantIslands:: 1903AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) 1904{ 1905 MachineFunction &MF = *BB->getParent(); 1906 1907 // If the destination block is terminated by an unconditional branch, 1908 // try to move it; otherwise, create a new block following the jump 1909 // table that branches back to the actual target. This is a very simple 1910 // heuristic. FIXME: We can definitely improve it. 1911 MachineBasicBlock *TBB = 0, *FBB = 0; 1912 SmallVector<MachineOperand, 4> Cond; 1913 SmallVector<MachineOperand, 4> CondPrior; 1914 MachineFunction::iterator BBi = BB; 1915 MachineFunction::iterator OldPrior = prior(BBi); 1916 1917 // If the block terminator isn't analyzable, don't try to move the block 1918 bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond); 1919 1920 // If the block ends in an unconditional branch, move it. The prior block 1921 // has to have an analyzable terminator for us to move this one. Be paranoid 1922 // and make sure we're not trying to move the entry block of the function. 1923 if (!B && Cond.empty() && BB != MF.begin() && 1924 !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) { 1925 BB->moveAfter(JTBB); 1926 OldPrior->updateTerminator(); 1927 BB->updateTerminator(); 1928 // Update numbering to account for the block being moved. 1929 MF.RenumberBlocks(); 1930 ++NumJTMoved; 1931 return NULL; 1932 } 1933 1934 // Create a new MBB for the code after the jump BB. 1935 MachineBasicBlock *NewBB = 1936 MF.CreateMachineBasicBlock(JTBB->getBasicBlock()); 1937 MachineFunction::iterator MBBI = JTBB; ++MBBI; 1938 MF.insert(MBBI, NewBB); 1939 1940 // Add an unconditional branch from NewBB to BB. 1941 // There doesn't seem to be meaningful DebugInfo available; this doesn't 1942 // correspond directly to anything in the source. 1943 assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?"); 1944 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB) 1945 .addImm(ARMCC::AL).addReg(0); 1946 1947 // Update internal data structures to account for the newly inserted MBB. 1948 MF.RenumberBlocks(NewBB); 1949 1950 // Update the CFG. 1951 NewBB->addSuccessor(BB); 1952 JTBB->removeSuccessor(BB); 1953 JTBB->addSuccessor(NewBB); 1954 1955 ++NumJTInserted; 1956 return NewBB; 1957} 1958