ARMBaseInstrInfo.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the Base ARM implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARM.h" 15#include "ARMBaseInstrInfo.h" 16#include "ARMBaseRegisterInfo.h" 17#include "ARMConstantPoolValue.h" 18#include "ARMFeatures.h" 19#include "ARMHazardRecognizer.h" 20#include "ARMMachineFunctionInfo.h" 21#include "MCTargetDesc/ARMAddressingModes.h" 22#include "llvm/ADT/STLExtras.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineConstantPool.h" 25#include "llvm/CodeGen/MachineFrameInfo.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineJumpTableInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/SelectionDAGNodes.h" 31#include "llvm/IR/Constants.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/GlobalValue.h" 34#include "llvm/MC/MCAsmInfo.h" 35#include "llvm/Support/BranchProbability.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39 40#define GET_INSTRINFO_CTOR_DTOR 41#include "ARMGenInstrInfo.inc" 42 43using namespace llvm; 44 45static cl::opt<bool> 46EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 47 cl::desc("Enable ARM 2-addr to 3-addr conv")); 48 49static cl::opt<bool> 50WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true), 51 cl::desc("Widen ARM vmovs to vmovd when possible")); 52 53static cl::opt<unsigned> 54SwiftPartialUpdateClearance("swift-partial-update-clearance", 55 cl::Hidden, cl::init(12), 56 cl::desc("Clearance before partial register updates")); 57 58/// ARM_MLxEntry - Record information about MLA / MLS instructions. 59struct ARM_MLxEntry { 60 uint16_t MLxOpc; // MLA / MLS opcode 61 uint16_t MulOpc; // Expanded multiplication opcode 62 uint16_t AddSubOpc; // Expanded add / sub opcode 63 bool NegAcc; // True if the acc is negated before the add / sub. 64 bool HasLane; // True if instruction has an extra "lane" operand. 65}; 66 67static const ARM_MLxEntry ARM_MLxTable[] = { 68 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 69 // fp scalar ops 70 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 71 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 72 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 73 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 74 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 75 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 76 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 77 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 78 79 // fp SIMD ops 80 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 81 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 82 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 83 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 84 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 85 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 86 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 87 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 88}; 89 90ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 91 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 92 Subtarget(STI) { 93 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 94 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 95 assert(false && "Duplicated entries?"); 96 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 97 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 98 } 99} 100 101// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 102// currently defaults to no prepass hazard recognizer. 103ScheduleHazardRecognizer *ARMBaseInstrInfo:: 104CreateTargetHazardRecognizer(const TargetMachine *TM, 105 const ScheduleDAG *DAG) const { 106 if (usePreRAHazardRecognizer()) { 107 const InstrItineraryData *II = TM->getInstrItineraryData(); 108 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 109 } 110 return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG); 111} 112 113ScheduleHazardRecognizer *ARMBaseInstrInfo:: 114CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 115 const ScheduleDAG *DAG) const { 116 if (Subtarget.isThumb2() || Subtarget.hasVFP2()) 117 return (ScheduleHazardRecognizer *)new ARMHazardRecognizer(II, DAG); 118 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); 119} 120 121MachineInstr * 122ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 123 MachineBasicBlock::iterator &MBBI, 124 LiveVariables *LV) const { 125 // FIXME: Thumb2 support. 126 127 if (!EnableARM3Addr) 128 return NULL; 129 130 MachineInstr *MI = MBBI; 131 MachineFunction &MF = *MI->getParent()->getParent(); 132 uint64_t TSFlags = MI->getDesc().TSFlags; 133 bool isPre = false; 134 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 135 default: return NULL; 136 case ARMII::IndexModePre: 137 isPre = true; 138 break; 139 case ARMII::IndexModePost: 140 break; 141 } 142 143 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 144 // operation. 145 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode()); 146 if (MemOpc == 0) 147 return NULL; 148 149 MachineInstr *UpdateMI = NULL; 150 MachineInstr *MemMI = NULL; 151 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 152 const MCInstrDesc &MCID = MI->getDesc(); 153 unsigned NumOps = MCID.getNumOperands(); 154 bool isLoad = !MI->mayStore(); 155 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); 156 const MachineOperand &Base = MI->getOperand(2); 157 const MachineOperand &Offset = MI->getOperand(NumOps-3); 158 unsigned WBReg = WB.getReg(); 159 unsigned BaseReg = Base.getReg(); 160 unsigned OffReg = Offset.getReg(); 161 unsigned OffImm = MI->getOperand(NumOps-2).getImm(); 162 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm(); 163 switch (AddrMode) { 164 default: llvm_unreachable("Unknown indexed op!"); 165 case ARMII::AddrMode2: { 166 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 167 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 168 if (OffReg == 0) { 169 if (ARM_AM::getSOImmVal(Amt) == -1) 170 // Can't encode it in a so_imm operand. This transformation will 171 // add more than 1 instruction. Abandon! 172 return NULL; 173 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 174 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 175 .addReg(BaseReg).addImm(Amt) 176 .addImm(Pred).addReg(0).addReg(0); 177 } else if (Amt != 0) { 178 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 179 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 180 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 181 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 182 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc) 183 .addImm(Pred).addReg(0).addReg(0); 184 } else 185 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 186 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 187 .addReg(BaseReg).addReg(OffReg) 188 .addImm(Pred).addReg(0).addReg(0); 189 break; 190 } 191 case ARMII::AddrMode3 : { 192 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 193 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 194 if (OffReg == 0) 195 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 196 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 197 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 198 .addReg(BaseReg).addImm(Amt) 199 .addImm(Pred).addReg(0).addReg(0); 200 else 201 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 202 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 203 .addReg(BaseReg).addReg(OffReg) 204 .addImm(Pred).addReg(0).addReg(0); 205 break; 206 } 207 } 208 209 std::vector<MachineInstr*> NewMIs; 210 if (isPre) { 211 if (isLoad) 212 MemMI = BuildMI(MF, MI->getDebugLoc(), 213 get(MemOpc), MI->getOperand(0).getReg()) 214 .addReg(WBReg).addImm(0).addImm(Pred); 215 else 216 MemMI = BuildMI(MF, MI->getDebugLoc(), 217 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 218 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred); 219 NewMIs.push_back(MemMI); 220 NewMIs.push_back(UpdateMI); 221 } else { 222 if (isLoad) 223 MemMI = BuildMI(MF, MI->getDebugLoc(), 224 get(MemOpc), MI->getOperand(0).getReg()) 225 .addReg(BaseReg).addImm(0).addImm(Pred); 226 else 227 MemMI = BuildMI(MF, MI->getDebugLoc(), 228 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 229 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred); 230 if (WB.isDead()) 231 UpdateMI->getOperand(0).setIsDead(); 232 NewMIs.push_back(UpdateMI); 233 NewMIs.push_back(MemMI); 234 } 235 236 // Transfer LiveVariables states, kill / dead info. 237 if (LV) { 238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 239 MachineOperand &MO = MI->getOperand(i); 240 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 241 unsigned Reg = MO.getReg(); 242 243 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 244 if (MO.isDef()) { 245 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 246 if (MO.isDead()) 247 LV->addVirtualRegisterDead(Reg, NewMI); 248 } 249 if (MO.isUse() && MO.isKill()) { 250 for (unsigned j = 0; j < 2; ++j) { 251 // Look at the two new MI's in reverse order. 252 MachineInstr *NewMI = NewMIs[j]; 253 if (!NewMI->readsRegister(Reg)) 254 continue; 255 LV->addVirtualRegisterKilled(Reg, NewMI); 256 if (VI.removeKill(MI)) 257 VI.Kills.push_back(NewMI); 258 break; 259 } 260 } 261 } 262 } 263 } 264 265 MFI->insert(MBBI, NewMIs[1]); 266 MFI->insert(MBBI, NewMIs[0]); 267 return NewMIs[0]; 268} 269 270// Branch analysis. 271bool 272ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 273 MachineBasicBlock *&FBB, 274 SmallVectorImpl<MachineOperand> &Cond, 275 bool AllowModify) const { 276 TBB = 0; 277 FBB = 0; 278 279 MachineBasicBlock::iterator I = MBB.end(); 280 if (I == MBB.begin()) 281 return false; // Empty blocks are easy. 282 --I; 283 284 // Walk backwards from the end of the basic block until the branch is 285 // analyzed or we give up. 286 while (isPredicated(I) || I->isTerminator() || I->isDebugValue()) { 287 288 // Flag to be raised on unanalyzeable instructions. This is useful in cases 289 // where we want to clean up on the end of the basic block before we bail 290 // out. 291 bool CantAnalyze = false; 292 293 // Skip over DEBUG values and predicated nonterminators. 294 while (I->isDebugValue() || !I->isTerminator()) { 295 if (I == MBB.begin()) 296 return false; 297 --I; 298 } 299 300 if (isIndirectBranchOpcode(I->getOpcode()) || 301 isJumpTableBranchOpcode(I->getOpcode())) { 302 // Indirect branches and jump tables can't be analyzed, but we still want 303 // to clean up any instructions at the tail of the basic block. 304 CantAnalyze = true; 305 } else if (isUncondBranchOpcode(I->getOpcode())) { 306 TBB = I->getOperand(0).getMBB(); 307 } else if (isCondBranchOpcode(I->getOpcode())) { 308 // Bail out if we encounter multiple conditional branches. 309 if (!Cond.empty()) 310 return true; 311 312 assert(!FBB && "FBB should have been null."); 313 FBB = TBB; 314 TBB = I->getOperand(0).getMBB(); 315 Cond.push_back(I->getOperand(1)); 316 Cond.push_back(I->getOperand(2)); 317 } else if (I->isReturn()) { 318 // Returns can't be analyzed, but we should run cleanup. 319 CantAnalyze = !isPredicated(I); 320 } else { 321 // We encountered other unrecognized terminator. Bail out immediately. 322 return true; 323 } 324 325 // Cleanup code - to be run for unpredicated unconditional branches and 326 // returns. 327 if (!isPredicated(I) && 328 (isUncondBranchOpcode(I->getOpcode()) || 329 isIndirectBranchOpcode(I->getOpcode()) || 330 isJumpTableBranchOpcode(I->getOpcode()) || 331 I->isReturn())) { 332 // Forget any previous condition branch information - it no longer applies. 333 Cond.clear(); 334 FBB = 0; 335 336 // If we can modify the function, delete everything below this 337 // unconditional branch. 338 if (AllowModify) { 339 MachineBasicBlock::iterator DI = std::next(I); 340 while (DI != MBB.end()) { 341 MachineInstr *InstToDelete = DI; 342 ++DI; 343 InstToDelete->eraseFromParent(); 344 } 345 } 346 } 347 348 if (CantAnalyze) 349 return true; 350 351 if (I == MBB.begin()) 352 return false; 353 354 --I; 355 } 356 357 // We made it past the terminators without bailing out - we must have 358 // analyzed this branch successfully. 359 return false; 360} 361 362 363unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 364 MachineBasicBlock::iterator I = MBB.end(); 365 if (I == MBB.begin()) return 0; 366 --I; 367 while (I->isDebugValue()) { 368 if (I == MBB.begin()) 369 return 0; 370 --I; 371 } 372 if (!isUncondBranchOpcode(I->getOpcode()) && 373 !isCondBranchOpcode(I->getOpcode())) 374 return 0; 375 376 // Remove the branch. 377 I->eraseFromParent(); 378 379 I = MBB.end(); 380 381 if (I == MBB.begin()) return 1; 382 --I; 383 if (!isCondBranchOpcode(I->getOpcode())) 384 return 1; 385 386 // Remove the branch. 387 I->eraseFromParent(); 388 return 2; 389} 390 391unsigned 392ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 393 MachineBasicBlock *FBB, 394 const SmallVectorImpl<MachineOperand> &Cond, 395 DebugLoc DL) const { 396 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 397 int BOpc = !AFI->isThumbFunction() 398 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 399 int BccOpc = !AFI->isThumbFunction() 400 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 401 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 402 403 // Shouldn't be a fall through. 404 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 405 assert((Cond.size() == 2 || Cond.size() == 0) && 406 "ARM branch conditions have two components!"); 407 408 if (FBB == 0) { 409 if (Cond.empty()) { // Unconditional branch? 410 if (isThumb) 411 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0); 412 else 413 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 414 } else 415 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 416 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 417 return 1; 418 } 419 420 // Two-way conditional branch. 421 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 422 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 423 if (isThumb) 424 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); 425 else 426 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 427 return 2; 428} 429 430bool ARMBaseInstrInfo:: 431ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 432 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 433 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 434 return false; 435} 436 437bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const { 438 if (MI->isBundle()) { 439 MachineBasicBlock::const_instr_iterator I = MI; 440 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 441 while (++I != E && I->isInsideBundle()) { 442 int PIdx = I->findFirstPredOperandIdx(); 443 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 444 return true; 445 } 446 return false; 447 } 448 449 int PIdx = MI->findFirstPredOperandIdx(); 450 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL; 451} 452 453bool ARMBaseInstrInfo:: 454PredicateInstruction(MachineInstr *MI, 455 const SmallVectorImpl<MachineOperand> &Pred) const { 456 unsigned Opc = MI->getOpcode(); 457 if (isUncondBranchOpcode(Opc)) { 458 MI->setDesc(get(getMatchingCondBranchOpcode(Opc))); 459 MachineInstrBuilder(*MI->getParent()->getParent(), MI) 460 .addImm(Pred[0].getImm()) 461 .addReg(Pred[1].getReg()); 462 return true; 463 } 464 465 int PIdx = MI->findFirstPredOperandIdx(); 466 if (PIdx != -1) { 467 MachineOperand &PMO = MI->getOperand(PIdx); 468 PMO.setImm(Pred[0].getImm()); 469 MI->getOperand(PIdx+1).setReg(Pred[1].getReg()); 470 return true; 471 } 472 return false; 473} 474 475bool ARMBaseInstrInfo:: 476SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 477 const SmallVectorImpl<MachineOperand> &Pred2) const { 478 if (Pred1.size() > 2 || Pred2.size() > 2) 479 return false; 480 481 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 482 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 483 if (CC1 == CC2) 484 return true; 485 486 switch (CC1) { 487 default: 488 return false; 489 case ARMCC::AL: 490 return true; 491 case ARMCC::HS: 492 return CC2 == ARMCC::HI; 493 case ARMCC::LS: 494 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 495 case ARMCC::GE: 496 return CC2 == ARMCC::GT; 497 case ARMCC::LE: 498 return CC2 == ARMCC::LT; 499 } 500} 501 502bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, 503 std::vector<MachineOperand> &Pred) const { 504 bool Found = false; 505 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 506 const MachineOperand &MO = MI->getOperand(i); 507 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) || 508 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) { 509 Pred.push_back(MO); 510 Found = true; 511 } 512 } 513 514 return Found; 515} 516 517/// isPredicable - Return true if the specified instruction can be predicated. 518/// By default, this returns true for every instruction with a 519/// PredicateOperand. 520bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { 521 if (!MI->isPredicable()) 522 return false; 523 524 ARMFunctionInfo *AFI = 525 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>(); 526 527 if (AFI->isThumb2Function()) { 528 if (getSubtarget().restrictIT()) 529 return isV8EligibleForIT(MI); 530 } else { // non-Thumb 531 if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) 532 return false; 533 } 534 535 return true; 536} 537 538template<> bool IsCPSRDead<MachineInstr>(MachineInstr* MI) { 539 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 540 const MachineOperand &MO = MI->getOperand(i); 541 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 542 continue; 543 if (MO.getReg() != ARM::CPSR) 544 continue; 545 if (!MO.isDead()) 546 return false; 547 } 548 // all definitions of CPSR are dead 549 return true; 550} 551 552/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing. 553LLVM_ATTRIBUTE_NOINLINE 554static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 555 unsigned JTI); 556static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 557 unsigned JTI) { 558 assert(JTI < JT.size()); 559 return JT[JTI].MBBs.size(); 560} 561 562/// GetInstSize - Return the size of the specified MachineInstr. 563/// 564unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 565 const MachineBasicBlock &MBB = *MI->getParent(); 566 const MachineFunction *MF = MBB.getParent(); 567 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 568 569 const MCInstrDesc &MCID = MI->getDesc(); 570 if (MCID.getSize()) 571 return MCID.getSize(); 572 573 // If this machine instr is an inline asm, measure it. 574 if (MI->getOpcode() == ARM::INLINEASM) 575 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI); 576 unsigned Opc = MI->getOpcode(); 577 switch (Opc) { 578 default: 579 // pseudo-instruction sizes are zero. 580 return 0; 581 case TargetOpcode::BUNDLE: 582 return getInstBundleLength(MI); 583 case ARM::MOVi16_ga_pcrel: 584 case ARM::MOVTi16_ga_pcrel: 585 case ARM::t2MOVi16_ga_pcrel: 586 case ARM::t2MOVTi16_ga_pcrel: 587 return 4; 588 case ARM::MOVi32imm: 589 case ARM::t2MOVi32imm: 590 return 8; 591 case ARM::CONSTPOOL_ENTRY: 592 // If this machine instr is a constant pool entry, its size is recorded as 593 // operand #2. 594 return MI->getOperand(2).getImm(); 595 case ARM::Int_eh_sjlj_longjmp: 596 return 16; 597 case ARM::tInt_eh_sjlj_longjmp: 598 return 10; 599 case ARM::Int_eh_sjlj_setjmp: 600 case ARM::Int_eh_sjlj_setjmp_nofp: 601 return 20; 602 case ARM::tInt_eh_sjlj_setjmp: 603 case ARM::t2Int_eh_sjlj_setjmp: 604 case ARM::t2Int_eh_sjlj_setjmp_nofp: 605 return 12; 606 case ARM::BR_JTr: 607 case ARM::BR_JTm: 608 case ARM::BR_JTadd: 609 case ARM::tBR_JTr: 610 case ARM::t2BR_JT: 611 case ARM::t2TBB_JT: 612 case ARM::t2TBH_JT: { 613 // These are jumptable branches, i.e. a branch followed by an inlined 614 // jumptable. The size is 4 + 4 * number of entries. For TBB, each 615 // entry is one byte; TBH two byte each. 616 unsigned EntrySize = (Opc == ARM::t2TBB_JT) 617 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); 618 unsigned NumOps = MCID.getNumOperands(); 619 MachineOperand JTOP = 620 MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2)); 621 unsigned JTI = JTOP.getIndex(); 622 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 623 assert(MJTI != 0); 624 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 625 assert(JTI < JT.size()); 626 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte 627 // 4 aligned. The assembler / linker may add 2 byte padding just before 628 // the JT entries. The size does not include this padding; the 629 // constant islands pass does separate bookkeeping for it. 630 // FIXME: If we know the size of the function is less than (1 << 16) *2 631 // bytes, we can use 16-bit entries instead. Then there won't be an 632 // alignment issue. 633 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4; 634 unsigned NumEntries = getNumJTEntries(JT, JTI); 635 if (Opc == ARM::t2TBB_JT && (NumEntries & 1)) 636 // Make sure the instruction that follows TBB is 2-byte aligned. 637 // FIXME: Constant island pass should insert an "ALIGN" instruction 638 // instead. 639 ++NumEntries; 640 return NumEntries * EntrySize + InstSize; 641 } 642 } 643} 644 645unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const { 646 unsigned Size = 0; 647 MachineBasicBlock::const_instr_iterator I = MI; 648 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 649 while (++I != E && I->isInsideBundle()) { 650 assert(!I->isBundle() && "No nested bundle!"); 651 Size += GetInstSizeInBytes(&*I); 652 } 653 return Size; 654} 655 656void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 657 MachineBasicBlock::iterator I, DebugLoc DL, 658 unsigned DestReg, unsigned SrcReg, 659 bool KillSrc) const { 660 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 661 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 662 663 if (GPRDest && GPRSrc) { 664 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 665 .addReg(SrcReg, getKillRegState(KillSrc)))); 666 return; 667 } 668 669 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 670 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 671 672 unsigned Opc = 0; 673 if (SPRDest && SPRSrc) 674 Opc = ARM::VMOVS; 675 else if (GPRDest && SPRSrc) 676 Opc = ARM::VMOVRS; 677 else if (SPRDest && GPRSrc) 678 Opc = ARM::VMOVSR; 679 else if (ARM::DPRRegClass.contains(DestReg, SrcReg)) 680 Opc = ARM::VMOVD; 681 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 682 Opc = ARM::VORRq; 683 684 if (Opc) { 685 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 686 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 687 if (Opc == ARM::VORRq) 688 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 689 AddDefaultPred(MIB); 690 return; 691 } 692 693 // Handle register classes that require multiple instructions. 694 unsigned BeginIdx = 0; 695 unsigned SubRegs = 0; 696 int Spacing = 1; 697 698 // Use VORRq when possible. 699 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) { 700 Opc = ARM::VORRq; 701 BeginIdx = ARM::qsub_0; 702 SubRegs = 2; 703 } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { 704 Opc = ARM::VORRq; 705 BeginIdx = ARM::qsub_0; 706 SubRegs = 4; 707 // Fall back to VMOVD. 708 } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) { 709 Opc = ARM::VMOVD; 710 BeginIdx = ARM::dsub_0; 711 SubRegs = 2; 712 } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) { 713 Opc = ARM::VMOVD; 714 BeginIdx = ARM::dsub_0; 715 SubRegs = 3; 716 } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) { 717 Opc = ARM::VMOVD; 718 BeginIdx = ARM::dsub_0; 719 SubRegs = 4; 720 } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) { 721 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr; 722 BeginIdx = ARM::gsub_0; 723 SubRegs = 2; 724 } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) { 725 Opc = ARM::VMOVD; 726 BeginIdx = ARM::dsub_0; 727 SubRegs = 2; 728 Spacing = 2; 729 } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) { 730 Opc = ARM::VMOVD; 731 BeginIdx = ARM::dsub_0; 732 SubRegs = 3; 733 Spacing = 2; 734 } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) { 735 Opc = ARM::VMOVD; 736 BeginIdx = ARM::dsub_0; 737 SubRegs = 4; 738 Spacing = 2; 739 } 740 741 assert(Opc && "Impossible reg-to-reg copy"); 742 743 const TargetRegisterInfo *TRI = &getRegisterInfo(); 744 MachineInstrBuilder Mov; 745 746 // Copy register tuples backward when the first Dest reg overlaps with SrcReg. 747 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { 748 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing); 749 Spacing = -Spacing; 750 } 751#ifndef NDEBUG 752 SmallSet<unsigned, 4> DstRegs; 753#endif 754 for (unsigned i = 0; i != SubRegs; ++i) { 755 unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); 756 unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); 757 assert(Dst && Src && "Bad sub-register"); 758#ifndef NDEBUG 759 assert(!DstRegs.count(Src) && "destructive vector copy"); 760 DstRegs.insert(Dst); 761#endif 762 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src); 763 // VORR takes two source operands. 764 if (Opc == ARM::VORRq) 765 Mov.addReg(Src); 766 Mov = AddDefaultPred(Mov); 767 // MOVr can set CC. 768 if (Opc == ARM::MOVr) 769 Mov = AddDefaultCC(Mov); 770 } 771 // Add implicit super-register defs and kills to the last instruction. 772 Mov->addRegisterDefined(DestReg, TRI); 773 if (KillSrc) 774 Mov->addRegisterKilled(SrcReg, TRI); 775} 776 777const MachineInstrBuilder & 778ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, 779 unsigned SubIdx, unsigned State, 780 const TargetRegisterInfo *TRI) const { 781 if (!SubIdx) 782 return MIB.addReg(Reg, State); 783 784 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 785 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 786 return MIB.addReg(Reg, State, SubIdx); 787} 788 789void ARMBaseInstrInfo:: 790storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 791 unsigned SrcReg, bool isKill, int FI, 792 const TargetRegisterClass *RC, 793 const TargetRegisterInfo *TRI) const { 794 DebugLoc DL; 795 if (I != MBB.end()) DL = I->getDebugLoc(); 796 MachineFunction &MF = *MBB.getParent(); 797 MachineFrameInfo &MFI = *MF.getFrameInfo(); 798 unsigned Align = MFI.getObjectAlignment(FI); 799 800 MachineMemOperand *MMO = 801 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 802 MachineMemOperand::MOStore, 803 MFI.getObjectSize(FI), 804 Align); 805 806 switch (RC->getSize()) { 807 case 4: 808 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 809 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) 810 .addReg(SrcReg, getKillRegState(isKill)) 811 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 812 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 813 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) 814 .addReg(SrcReg, getKillRegState(isKill)) 815 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 816 } else 817 llvm_unreachable("Unknown reg class!"); 818 break; 819 case 8: 820 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 821 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) 822 .addReg(SrcReg, getKillRegState(isKill)) 823 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 824 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 825 if (Subtarget.hasV5TEOps()) { 826 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::STRD)); 827 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 828 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 829 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO); 830 831 AddDefaultPred(MIB); 832 } else { 833 // Fallback to STM instruction, which has existed since the dawn of 834 // time. 835 MachineInstrBuilder MIB = 836 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA)) 837 .addFrameIndex(FI).addMemOperand(MMO)); 838 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 839 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 840 } 841 } else 842 llvm_unreachable("Unknown reg class!"); 843 break; 844 case 16: 845 if (ARM::DPairRegClass.hasSubClassEq(RC)) { 846 // Use aligned spills if the stack can be realigned. 847 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 848 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64)) 849 .addFrameIndex(FI).addImm(16) 850 .addReg(SrcReg, getKillRegState(isKill)) 851 .addMemOperand(MMO)); 852 } else { 853 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) 854 .addReg(SrcReg, getKillRegState(isKill)) 855 .addFrameIndex(FI) 856 .addMemOperand(MMO)); 857 } 858 } else 859 llvm_unreachable("Unknown reg class!"); 860 break; 861 case 24: 862 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 863 // Use aligned spills if the stack can be realigned. 864 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 865 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo)) 866 .addFrameIndex(FI).addImm(16) 867 .addReg(SrcReg, getKillRegState(isKill)) 868 .addMemOperand(MMO)); 869 } else { 870 MachineInstrBuilder MIB = 871 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 872 .addFrameIndex(FI)) 873 .addMemOperand(MMO); 874 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 875 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 876 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 877 } 878 } else 879 llvm_unreachable("Unknown reg class!"); 880 break; 881 case 32: 882 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 883 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 884 // FIXME: It's possible to only store part of the QQ register if the 885 // spilled def has a sub-register index. 886 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) 887 .addFrameIndex(FI).addImm(16) 888 .addReg(SrcReg, getKillRegState(isKill)) 889 .addMemOperand(MMO)); 890 } else { 891 MachineInstrBuilder MIB = 892 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 893 .addFrameIndex(FI)) 894 .addMemOperand(MMO); 895 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 896 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 897 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 898 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 899 } 900 } else 901 llvm_unreachable("Unknown reg class!"); 902 break; 903 case 64: 904 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 905 MachineInstrBuilder MIB = 906 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 907 .addFrameIndex(FI)) 908 .addMemOperand(MMO); 909 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 910 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 911 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 912 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 913 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 914 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 915 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 916 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 917 } else 918 llvm_unreachable("Unknown reg class!"); 919 break; 920 default: 921 llvm_unreachable("Unknown reg class!"); 922 } 923} 924 925unsigned 926ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 927 int &FrameIndex) const { 928 switch (MI->getOpcode()) { 929 default: break; 930 case ARM::STRrs: 931 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 932 if (MI->getOperand(1).isFI() && 933 MI->getOperand(2).isReg() && 934 MI->getOperand(3).isImm() && 935 MI->getOperand(2).getReg() == 0 && 936 MI->getOperand(3).getImm() == 0) { 937 FrameIndex = MI->getOperand(1).getIndex(); 938 return MI->getOperand(0).getReg(); 939 } 940 break; 941 case ARM::STRi12: 942 case ARM::t2STRi12: 943 case ARM::tSTRspi: 944 case ARM::VSTRD: 945 case ARM::VSTRS: 946 if (MI->getOperand(1).isFI() && 947 MI->getOperand(2).isImm() && 948 MI->getOperand(2).getImm() == 0) { 949 FrameIndex = MI->getOperand(1).getIndex(); 950 return MI->getOperand(0).getReg(); 951 } 952 break; 953 case ARM::VST1q64: 954 case ARM::VST1d64TPseudo: 955 case ARM::VST1d64QPseudo: 956 if (MI->getOperand(0).isFI() && 957 MI->getOperand(2).getSubReg() == 0) { 958 FrameIndex = MI->getOperand(0).getIndex(); 959 return MI->getOperand(2).getReg(); 960 } 961 break; 962 case ARM::VSTMQIA: 963 if (MI->getOperand(1).isFI() && 964 MI->getOperand(0).getSubReg() == 0) { 965 FrameIndex = MI->getOperand(1).getIndex(); 966 return MI->getOperand(0).getReg(); 967 } 968 break; 969 } 970 971 return 0; 972} 973 974unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 975 int &FrameIndex) const { 976 const MachineMemOperand *Dummy; 977 return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); 978} 979 980void ARMBaseInstrInfo:: 981loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 982 unsigned DestReg, int FI, 983 const TargetRegisterClass *RC, 984 const TargetRegisterInfo *TRI) const { 985 DebugLoc DL; 986 if (I != MBB.end()) DL = I->getDebugLoc(); 987 MachineFunction &MF = *MBB.getParent(); 988 MachineFrameInfo &MFI = *MF.getFrameInfo(); 989 unsigned Align = MFI.getObjectAlignment(FI); 990 MachineMemOperand *MMO = 991 MF.getMachineMemOperand( 992 MachinePointerInfo::getFixedStack(FI), 993 MachineMemOperand::MOLoad, 994 MFI.getObjectSize(FI), 995 Align); 996 997 switch (RC->getSize()) { 998 case 4: 999 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1000 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 1001 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 1002 1003 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1004 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 1005 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 1006 } else 1007 llvm_unreachable("Unknown reg class!"); 1008 break; 1009 case 8: 1010 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1011 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 1012 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 1013 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1014 MachineInstrBuilder MIB; 1015 1016 if (Subtarget.hasV5TEOps()) { 1017 MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); 1018 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1019 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1020 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO); 1021 1022 AddDefaultPred(MIB); 1023 } else { 1024 // Fallback to LDM instruction, which has existed since the dawn of 1025 // time. 1026 MIB = AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDMIA)) 1027 .addFrameIndex(FI).addMemOperand(MMO)); 1028 MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1029 MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1030 } 1031 1032 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 1033 MIB.addReg(DestReg, RegState::ImplicitDefine); 1034 } else 1035 llvm_unreachable("Unknown reg class!"); 1036 break; 1037 case 16: 1038 if (ARM::DPairRegClass.hasSubClassEq(RC)) { 1039 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 1040 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 1041 .addFrameIndex(FI).addImm(16) 1042 .addMemOperand(MMO)); 1043 } else { 1044 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 1045 .addFrameIndex(FI) 1046 .addMemOperand(MMO)); 1047 } 1048 } else 1049 llvm_unreachable("Unknown reg class!"); 1050 break; 1051 case 24: 1052 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1053 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 1054 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) 1055 .addFrameIndex(FI).addImm(16) 1056 .addMemOperand(MMO)); 1057 } else { 1058 MachineInstrBuilder MIB = 1059 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1060 .addFrameIndex(FI) 1061 .addMemOperand(MMO)); 1062 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1063 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1064 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1065 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 1066 MIB.addReg(DestReg, RegState::ImplicitDefine); 1067 } 1068 } else 1069 llvm_unreachable("Unknown reg class!"); 1070 break; 1071 case 32: 1072 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 1073 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 1074 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 1075 .addFrameIndex(FI).addImm(16) 1076 .addMemOperand(MMO)); 1077 } else { 1078 MachineInstrBuilder MIB = 1079 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1080 .addFrameIndex(FI)) 1081 .addMemOperand(MMO); 1082 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1083 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1084 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1085 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1086 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 1087 MIB.addReg(DestReg, RegState::ImplicitDefine); 1088 } 1089 } else 1090 llvm_unreachable("Unknown reg class!"); 1091 break; 1092 case 64: 1093 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1094 MachineInstrBuilder MIB = 1095 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1096 .addFrameIndex(FI)) 1097 .addMemOperand(MMO); 1098 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1099 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1100 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1101 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1102 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 1103 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 1104 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 1105 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 1106 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 1107 MIB.addReg(DestReg, RegState::ImplicitDefine); 1108 } else 1109 llvm_unreachable("Unknown reg class!"); 1110 break; 1111 default: 1112 llvm_unreachable("Unknown regclass!"); 1113 } 1114} 1115 1116unsigned 1117ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 1118 int &FrameIndex) const { 1119 switch (MI->getOpcode()) { 1120 default: break; 1121 case ARM::LDRrs: 1122 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 1123 if (MI->getOperand(1).isFI() && 1124 MI->getOperand(2).isReg() && 1125 MI->getOperand(3).isImm() && 1126 MI->getOperand(2).getReg() == 0 && 1127 MI->getOperand(3).getImm() == 0) { 1128 FrameIndex = MI->getOperand(1).getIndex(); 1129 return MI->getOperand(0).getReg(); 1130 } 1131 break; 1132 case ARM::LDRi12: 1133 case ARM::t2LDRi12: 1134 case ARM::tLDRspi: 1135 case ARM::VLDRD: 1136 case ARM::VLDRS: 1137 if (MI->getOperand(1).isFI() && 1138 MI->getOperand(2).isImm() && 1139 MI->getOperand(2).getImm() == 0) { 1140 FrameIndex = MI->getOperand(1).getIndex(); 1141 return MI->getOperand(0).getReg(); 1142 } 1143 break; 1144 case ARM::VLD1q64: 1145 case ARM::VLD1d64TPseudo: 1146 case ARM::VLD1d64QPseudo: 1147 if (MI->getOperand(1).isFI() && 1148 MI->getOperand(0).getSubReg() == 0) { 1149 FrameIndex = MI->getOperand(1).getIndex(); 1150 return MI->getOperand(0).getReg(); 1151 } 1152 break; 1153 case ARM::VLDMQIA: 1154 if (MI->getOperand(1).isFI() && 1155 MI->getOperand(0).getSubReg() == 0) { 1156 FrameIndex = MI->getOperand(1).getIndex(); 1157 return MI->getOperand(0).getReg(); 1158 } 1159 break; 1160 } 1161 1162 return 0; 1163} 1164 1165unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 1166 int &FrameIndex) const { 1167 const MachineMemOperand *Dummy; 1168 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); 1169} 1170 1171bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ 1172 // This hook gets to expand COPY instructions before they become 1173 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1174 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1175 // changed into a VORR that can go down the NEON pipeline. 1176 if (!WidenVMOVS || !MI->isCopy() || Subtarget.isCortexA15()) 1177 return false; 1178 1179 // Look for a copy between even S-registers. That is where we keep floats 1180 // when using NEON v2f32 instructions for f32 arithmetic. 1181 unsigned DstRegS = MI->getOperand(0).getReg(); 1182 unsigned SrcRegS = MI->getOperand(1).getReg(); 1183 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1184 return false; 1185 1186 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1187 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1188 &ARM::DPRRegClass); 1189 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1190 &ARM::DPRRegClass); 1191 if (!DstRegD || !SrcRegD) 1192 return false; 1193 1194 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1195 // legal if the COPY already defines the full DstRegD, and it isn't a 1196 // sub-register insertion. 1197 if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI)) 1198 return false; 1199 1200 // A dead copy shouldn't show up here, but reject it just in case. 1201 if (MI->getOperand(0).isDead()) 1202 return false; 1203 1204 // All clear, widen the COPY. 1205 DEBUG(dbgs() << "widening: " << *MI); 1206 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI); 1207 1208 // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg 1209 // or some other super-register. 1210 int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD); 1211 if (ImpDefIdx != -1) 1212 MI->RemoveOperand(ImpDefIdx); 1213 1214 // Change the opcode and operands. 1215 MI->setDesc(get(ARM::VMOVD)); 1216 MI->getOperand(0).setReg(DstRegD); 1217 MI->getOperand(1).setReg(SrcRegD); 1218 AddDefaultPred(MIB); 1219 1220 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1221 // register scavenger and machine verifier, so we need to indicate that we 1222 // are reading an undefined value from SrcRegD, but a proper value from 1223 // SrcRegS. 1224 MI->getOperand(1).setIsUndef(); 1225 MIB.addReg(SrcRegS, RegState::Implicit); 1226 1227 // SrcRegD may actually contain an unrelated value in the ssub_1 1228 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1229 if (MI->getOperand(1).isKill()) { 1230 MI->getOperand(1).setIsKill(false); 1231 MI->addRegisterKilled(SrcRegS, TRI, true); 1232 } 1233 1234 DEBUG(dbgs() << "replaced by: " << *MI); 1235 return true; 1236} 1237 1238/// Create a copy of a const pool value. Update CPI to the new index and return 1239/// the label UID. 1240static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1241 MachineConstantPool *MCP = MF.getConstantPool(); 1242 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1243 1244 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1245 assert(MCPE.isMachineConstantPoolEntry() && 1246 "Expecting a machine constantpool entry!"); 1247 ARMConstantPoolValue *ACPV = 1248 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1249 1250 unsigned PCLabelId = AFI->createPICLabelUId(); 1251 ARMConstantPoolValue *NewCPV = 0; 1252 1253 // FIXME: The below assumes PIC relocation model and that the function 1254 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1255 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1256 // instructions, so that's probably OK, but is PIC always correct when 1257 // we get here? 1258 if (ACPV->isGlobalValue()) 1259 NewCPV = ARMConstantPoolConstant:: 1260 Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, 1261 ARMCP::CPValue, 4); 1262 else if (ACPV->isExtSymbol()) 1263 NewCPV = ARMConstantPoolSymbol:: 1264 Create(MF.getFunction()->getContext(), 1265 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1266 else if (ACPV->isBlockAddress()) 1267 NewCPV = ARMConstantPoolConstant:: 1268 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1269 ARMCP::CPBlockAddress, 4); 1270 else if (ACPV->isLSDA()) 1271 NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId, 1272 ARMCP::CPLSDA, 4); 1273 else if (ACPV->isMachineBasicBlock()) 1274 NewCPV = ARMConstantPoolMBB:: 1275 Create(MF.getFunction()->getContext(), 1276 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1277 else 1278 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1279 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment()); 1280 return PCLabelId; 1281} 1282 1283void ARMBaseInstrInfo:: 1284reMaterialize(MachineBasicBlock &MBB, 1285 MachineBasicBlock::iterator I, 1286 unsigned DestReg, unsigned SubIdx, 1287 const MachineInstr *Orig, 1288 const TargetRegisterInfo &TRI) const { 1289 unsigned Opcode = Orig->getOpcode(); 1290 switch (Opcode) { 1291 default: { 1292 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 1293 MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 1294 MBB.insert(I, MI); 1295 break; 1296 } 1297 case ARM::tLDRpci_pic: 1298 case ARM::t2LDRpci_pic: { 1299 MachineFunction &MF = *MBB.getParent(); 1300 unsigned CPI = Orig->getOperand(1).getIndex(); 1301 unsigned PCLabelId = duplicateCPV(MF, CPI); 1302 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode), 1303 DestReg) 1304 .addConstantPoolIndex(CPI).addImm(PCLabelId); 1305 MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end()); 1306 break; 1307 } 1308 } 1309} 1310 1311MachineInstr * 1312ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const { 1313 MachineInstr *MI = TargetInstrInfo::duplicate(Orig, MF); 1314 switch(Orig->getOpcode()) { 1315 case ARM::tLDRpci_pic: 1316 case ARM::t2LDRpci_pic: { 1317 unsigned CPI = Orig->getOperand(1).getIndex(); 1318 unsigned PCLabelId = duplicateCPV(MF, CPI); 1319 Orig->getOperand(1).setIndex(CPI); 1320 Orig->getOperand(2).setImm(PCLabelId); 1321 break; 1322 } 1323 } 1324 return MI; 1325} 1326 1327bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0, 1328 const MachineInstr *MI1, 1329 const MachineRegisterInfo *MRI) const { 1330 int Opcode = MI0->getOpcode(); 1331 if (Opcode == ARM::t2LDRpci || 1332 Opcode == ARM::t2LDRpci_pic || 1333 Opcode == ARM::tLDRpci || 1334 Opcode == ARM::tLDRpci_pic || 1335 Opcode == ARM::LDRLIT_ga_pcrel || 1336 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1337 Opcode == ARM::tLDRLIT_ga_pcrel || 1338 Opcode == ARM::MOV_ga_pcrel || 1339 Opcode == ARM::MOV_ga_pcrel_ldr || 1340 Opcode == ARM::t2MOV_ga_pcrel) { 1341 if (MI1->getOpcode() != Opcode) 1342 return false; 1343 if (MI0->getNumOperands() != MI1->getNumOperands()) 1344 return false; 1345 1346 const MachineOperand &MO0 = MI0->getOperand(1); 1347 const MachineOperand &MO1 = MI1->getOperand(1); 1348 if (MO0.getOffset() != MO1.getOffset()) 1349 return false; 1350 1351 if (Opcode == ARM::LDRLIT_ga_pcrel || 1352 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1353 Opcode == ARM::tLDRLIT_ga_pcrel || 1354 Opcode == ARM::MOV_ga_pcrel || 1355 Opcode == ARM::MOV_ga_pcrel_ldr || 1356 Opcode == ARM::t2MOV_ga_pcrel) 1357 // Ignore the PC labels. 1358 return MO0.getGlobal() == MO1.getGlobal(); 1359 1360 const MachineFunction *MF = MI0->getParent()->getParent(); 1361 const MachineConstantPool *MCP = MF->getConstantPool(); 1362 int CPI0 = MO0.getIndex(); 1363 int CPI1 = MO1.getIndex(); 1364 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1365 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1366 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1367 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1368 if (isARMCP0 && isARMCP1) { 1369 ARMConstantPoolValue *ACPV0 = 1370 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1371 ARMConstantPoolValue *ACPV1 = 1372 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1373 return ACPV0->hasSameValue(ACPV1); 1374 } else if (!isARMCP0 && !isARMCP1) { 1375 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1376 } 1377 return false; 1378 } else if (Opcode == ARM::PICLDR) { 1379 if (MI1->getOpcode() != Opcode) 1380 return false; 1381 if (MI0->getNumOperands() != MI1->getNumOperands()) 1382 return false; 1383 1384 unsigned Addr0 = MI0->getOperand(1).getReg(); 1385 unsigned Addr1 = MI1->getOperand(1).getReg(); 1386 if (Addr0 != Addr1) { 1387 if (!MRI || 1388 !TargetRegisterInfo::isVirtualRegister(Addr0) || 1389 !TargetRegisterInfo::isVirtualRegister(Addr1)) 1390 return false; 1391 1392 // This assumes SSA form. 1393 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1394 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1395 // Check if the loaded value, e.g. a constantpool of a global address, are 1396 // the same. 1397 if (!produceSameValue(Def0, Def1, MRI)) 1398 return false; 1399 } 1400 1401 for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) { 1402 // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg 1403 const MachineOperand &MO0 = MI0->getOperand(i); 1404 const MachineOperand &MO1 = MI1->getOperand(i); 1405 if (!MO0.isIdenticalTo(MO1)) 1406 return false; 1407 } 1408 return true; 1409 } 1410 1411 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1412} 1413 1414/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1415/// determine if two loads are loading from the same base address. It should 1416/// only return true if the base pointers are the same and the only differences 1417/// between the two addresses is the offset. It also returns the offsets by 1418/// reference. 1419/// 1420/// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1421/// is permanently disabled. 1422bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1423 int64_t &Offset1, 1424 int64_t &Offset2) const { 1425 // Don't worry about Thumb: just ARM and Thumb2. 1426 if (Subtarget.isThumb1Only()) return false; 1427 1428 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1429 return false; 1430 1431 switch (Load1->getMachineOpcode()) { 1432 default: 1433 return false; 1434 case ARM::LDRi12: 1435 case ARM::LDRBi12: 1436 case ARM::LDRD: 1437 case ARM::LDRH: 1438 case ARM::LDRSB: 1439 case ARM::LDRSH: 1440 case ARM::VLDRD: 1441 case ARM::VLDRS: 1442 case ARM::t2LDRi8: 1443 case ARM::t2LDRBi8: 1444 case ARM::t2LDRDi8: 1445 case ARM::t2LDRSHi8: 1446 case ARM::t2LDRi12: 1447 case ARM::t2LDRBi12: 1448 case ARM::t2LDRSHi12: 1449 break; 1450 } 1451 1452 switch (Load2->getMachineOpcode()) { 1453 default: 1454 return false; 1455 case ARM::LDRi12: 1456 case ARM::LDRBi12: 1457 case ARM::LDRD: 1458 case ARM::LDRH: 1459 case ARM::LDRSB: 1460 case ARM::LDRSH: 1461 case ARM::VLDRD: 1462 case ARM::VLDRS: 1463 case ARM::t2LDRi8: 1464 case ARM::t2LDRBi8: 1465 case ARM::t2LDRSHi8: 1466 case ARM::t2LDRi12: 1467 case ARM::t2LDRBi12: 1468 case ARM::t2LDRSHi12: 1469 break; 1470 } 1471 1472 // Check if base addresses and chain operands match. 1473 if (Load1->getOperand(0) != Load2->getOperand(0) || 1474 Load1->getOperand(4) != Load2->getOperand(4)) 1475 return false; 1476 1477 // Index should be Reg0. 1478 if (Load1->getOperand(3) != Load2->getOperand(3)) 1479 return false; 1480 1481 // Determine the offsets. 1482 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 1483 isa<ConstantSDNode>(Load2->getOperand(1))) { 1484 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 1485 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 1486 return true; 1487 } 1488 1489 return false; 1490} 1491 1492/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 1493/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 1494/// be scheduled togther. On some targets if two loads are loading from 1495/// addresses in the same cache line, it's better if they are scheduled 1496/// together. This function takes two integers that represent the load offsets 1497/// from the common base address. It returns true if it decides it's desirable 1498/// to schedule the two loads together. "NumLoads" is the number of loads that 1499/// have already been scheduled after Load1. 1500/// 1501/// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1502/// is permanently disabled. 1503bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1504 int64_t Offset1, int64_t Offset2, 1505 unsigned NumLoads) const { 1506 // Don't worry about Thumb: just ARM and Thumb2. 1507 if (Subtarget.isThumb1Only()) return false; 1508 1509 assert(Offset2 > Offset1); 1510 1511 if ((Offset2 - Offset1) / 8 > 64) 1512 return false; 1513 1514 // Check if the machine opcodes are different. If they are different 1515 // then we consider them to not be of the same base address, 1516 // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12. 1517 // In this case, they are considered to be the same because they are different 1518 // encoding forms of the same basic instruction. 1519 if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) && 1520 !((Load1->getMachineOpcode() == ARM::t2LDRBi8 && 1521 Load2->getMachineOpcode() == ARM::t2LDRBi12) || 1522 (Load1->getMachineOpcode() == ARM::t2LDRBi12 && 1523 Load2->getMachineOpcode() == ARM::t2LDRBi8))) 1524 return false; // FIXME: overly conservative? 1525 1526 // Four loads in a row should be sufficient. 1527 if (NumLoads >= 3) 1528 return false; 1529 1530 return true; 1531} 1532 1533bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, 1534 const MachineBasicBlock *MBB, 1535 const MachineFunction &MF) const { 1536 // Debug info is never a scheduling boundary. It's necessary to be explicit 1537 // due to the special treatment of IT instructions below, otherwise a 1538 // dbg_value followed by an IT will result in the IT instruction being 1539 // considered a scheduling hazard, which is wrong. It should be the actual 1540 // instruction preceding the dbg_value instruction(s), just like it is 1541 // when debug info is not present. 1542 if (MI->isDebugValue()) 1543 return false; 1544 1545 // Terminators and labels can't be scheduled around. 1546 if (MI->isTerminator() || MI->isPosition()) 1547 return true; 1548 1549 // Treat the start of the IT block as a scheduling boundary, but schedule 1550 // t2IT along with all instructions following it. 1551 // FIXME: This is a big hammer. But the alternative is to add all potential 1552 // true and anti dependencies to IT block instructions as implicit operands 1553 // to the t2IT instruction. The added compile time and complexity does not 1554 // seem worth it. 1555 MachineBasicBlock::const_iterator I = MI; 1556 // Make sure to skip any dbg_value instructions 1557 while (++I != MBB->end() && I->isDebugValue()) 1558 ; 1559 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 1560 return true; 1561 1562 // Don't attempt to schedule around any instruction that defines 1563 // a stack-oriented pointer, as it's unlikely to be profitable. This 1564 // saves compile time, because it doesn't require every single 1565 // stack slot reference to depend on the instruction that does the 1566 // modification. 1567 // Calls don't actually change the stack pointer, even if they have imp-defs. 1568 // No ARM calling conventions change the stack pointer. (X86 calling 1569 // conventions sometimes do). 1570 if (!MI->isCall() && MI->definesRegister(ARM::SP)) 1571 return true; 1572 1573 return false; 1574} 1575 1576bool ARMBaseInstrInfo:: 1577isProfitableToIfCvt(MachineBasicBlock &MBB, 1578 unsigned NumCycles, unsigned ExtraPredCycles, 1579 const BranchProbability &Probability) const { 1580 if (!NumCycles) 1581 return false; 1582 1583 // Attempt to estimate the relative costs of predication versus branching. 1584 unsigned UnpredCost = Probability.getNumerator() * NumCycles; 1585 UnpredCost /= Probability.getDenominator(); 1586 UnpredCost += 1; // The branch itself 1587 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1588 1589 return (NumCycles + ExtraPredCycles) <= UnpredCost; 1590} 1591 1592bool ARMBaseInstrInfo:: 1593isProfitableToIfCvt(MachineBasicBlock &TMBB, 1594 unsigned TCycles, unsigned TExtra, 1595 MachineBasicBlock &FMBB, 1596 unsigned FCycles, unsigned FExtra, 1597 const BranchProbability &Probability) const { 1598 if (!TCycles || !FCycles) 1599 return false; 1600 1601 // Attempt to estimate the relative costs of predication versus branching. 1602 unsigned TUnpredCost = Probability.getNumerator() * TCycles; 1603 TUnpredCost /= Probability.getDenominator(); 1604 1605 uint32_t Comp = Probability.getDenominator() - Probability.getNumerator(); 1606 unsigned FUnpredCost = Comp * FCycles; 1607 FUnpredCost /= Probability.getDenominator(); 1608 1609 unsigned UnpredCost = TUnpredCost + FUnpredCost; 1610 UnpredCost += 1; // The branch itself 1611 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1612 1613 return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost; 1614} 1615 1616bool 1617ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 1618 MachineBasicBlock &FMBB) const { 1619 // Reduce false anti-dependencies to let Swift's out-of-order execution 1620 // engine do its thing. 1621 return Subtarget.isSwift(); 1622} 1623 1624/// getInstrPredicate - If instruction is predicated, returns its predicate 1625/// condition, otherwise returns AL. It also returns the condition code 1626/// register by reference. 1627ARMCC::CondCodes 1628llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) { 1629 int PIdx = MI->findFirstPredOperandIdx(); 1630 if (PIdx == -1) { 1631 PredReg = 0; 1632 return ARMCC::AL; 1633 } 1634 1635 PredReg = MI->getOperand(PIdx+1).getReg(); 1636 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm(); 1637} 1638 1639 1640int llvm::getMatchingCondBranchOpcode(int Opc) { 1641 if (Opc == ARM::B) 1642 return ARM::Bcc; 1643 if (Opc == ARM::tB) 1644 return ARM::tBcc; 1645 if (Opc == ARM::t2B) 1646 return ARM::t2Bcc; 1647 1648 llvm_unreachable("Unknown unconditional branch opcode!"); 1649} 1650 1651/// commuteInstruction - Handle commutable instructions. 1652MachineInstr * 1653ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 1654 switch (MI->getOpcode()) { 1655 case ARM::MOVCCr: 1656 case ARM::t2MOVCCr: { 1657 // MOVCC can be commuted by inverting the condition. 1658 unsigned PredReg = 0; 1659 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 1660 // MOVCC AL can't be inverted. Shouldn't happen. 1661 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 1662 return NULL; 1663 MI = TargetInstrInfo::commuteInstruction(MI, NewMI); 1664 if (!MI) 1665 return NULL; 1666 // After swapping the MOVCC operands, also invert the condition. 1667 MI->getOperand(MI->findFirstPredOperandIdx()) 1668 .setImm(ARMCC::getOppositeCondition(CC)); 1669 return MI; 1670 } 1671 } 1672 return TargetInstrInfo::commuteInstruction(MI, NewMI); 1673} 1674 1675/// Identify instructions that can be folded into a MOVCC instruction, and 1676/// return the defining instruction. 1677static MachineInstr *canFoldIntoMOVCC(unsigned Reg, 1678 const MachineRegisterInfo &MRI, 1679 const TargetInstrInfo *TII) { 1680 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1681 return 0; 1682 if (!MRI.hasOneNonDBGUse(Reg)) 1683 return 0; 1684 MachineInstr *MI = MRI.getVRegDef(Reg); 1685 if (!MI) 1686 return 0; 1687 // MI is folded into the MOVCC by predicating it. 1688 if (!MI->isPredicable()) 1689 return 0; 1690 // Check if MI has any non-dead defs or physreg uses. This also detects 1691 // predicated instructions which will be reading CPSR. 1692 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 1693 const MachineOperand &MO = MI->getOperand(i); 1694 // Reject frame index operands, PEI can't handle the predicated pseudos. 1695 if (MO.isFI() || MO.isCPI() || MO.isJTI()) 1696 return 0; 1697 if (!MO.isReg()) 1698 continue; 1699 // MI can't have any tied operands, that would conflict with predication. 1700 if (MO.isTied()) 1701 return 0; 1702 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 1703 return 0; 1704 if (MO.isDef() && !MO.isDead()) 1705 return 0; 1706 } 1707 bool DontMoveAcrossStores = true; 1708 if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores)) 1709 return 0; 1710 return MI; 1711} 1712 1713bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI, 1714 SmallVectorImpl<MachineOperand> &Cond, 1715 unsigned &TrueOp, unsigned &FalseOp, 1716 bool &Optimizable) const { 1717 assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) && 1718 "Unknown select instruction"); 1719 // MOVCC operands: 1720 // 0: Def. 1721 // 1: True use. 1722 // 2: False use. 1723 // 3: Condition code. 1724 // 4: CPSR use. 1725 TrueOp = 1; 1726 FalseOp = 2; 1727 Cond.push_back(MI->getOperand(3)); 1728 Cond.push_back(MI->getOperand(4)); 1729 // We can always fold a def. 1730 Optimizable = true; 1731 return false; 1732} 1733 1734MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI, 1735 bool PreferFalse) const { 1736 assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) && 1737 "Unknown select instruction"); 1738 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1739 MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this); 1740 bool Invert = !DefMI; 1741 if (!DefMI) 1742 DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this); 1743 if (!DefMI) 1744 return 0; 1745 1746 // Find new register class to use. 1747 MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1); 1748 unsigned DestReg = MI->getOperand(0).getReg(); 1749 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg()); 1750 if (!MRI.constrainRegClass(DestReg, PreviousClass)) 1751 return 0; 1752 1753 // Create a new predicated version of DefMI. 1754 // Rfalse is the first use. 1755 MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1756 DefMI->getDesc(), DestReg); 1757 1758 // Copy all the DefMI operands, excluding its (null) predicate. 1759 const MCInstrDesc &DefDesc = DefMI->getDesc(); 1760 for (unsigned i = 1, e = DefDesc.getNumOperands(); 1761 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) 1762 NewMI.addOperand(DefMI->getOperand(i)); 1763 1764 unsigned CondCode = MI->getOperand(3).getImm(); 1765 if (Invert) 1766 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); 1767 else 1768 NewMI.addImm(CondCode); 1769 NewMI.addOperand(MI->getOperand(4)); 1770 1771 // DefMI is not the -S version that sets CPSR, so add an optional %noreg. 1772 if (NewMI->hasOptionalDef()) 1773 AddDefaultCC(NewMI); 1774 1775 // The output register value when the predicate is false is an implicit 1776 // register operand tied to the first def. 1777 // The tie makes the register allocator ensure the FalseReg is allocated the 1778 // same register as operand 0. 1779 FalseReg.setImplicit(); 1780 NewMI.addOperand(FalseReg); 1781 NewMI->tieOperands(0, NewMI->getNumOperands() - 1); 1782 1783 // The caller will erase MI, but not DefMI. 1784 DefMI->eraseFromParent(); 1785 return NewMI; 1786} 1787 1788/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 1789/// instruction is encoded with an 'S' bit is determined by the optional CPSR 1790/// def operand. 1791/// 1792/// This will go away once we can teach tblgen how to set the optional CPSR def 1793/// operand itself. 1794struct AddSubFlagsOpcodePair { 1795 uint16_t PseudoOpc; 1796 uint16_t MachineOpc; 1797}; 1798 1799static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 1800 {ARM::ADDSri, ARM::ADDri}, 1801 {ARM::ADDSrr, ARM::ADDrr}, 1802 {ARM::ADDSrsi, ARM::ADDrsi}, 1803 {ARM::ADDSrsr, ARM::ADDrsr}, 1804 1805 {ARM::SUBSri, ARM::SUBri}, 1806 {ARM::SUBSrr, ARM::SUBrr}, 1807 {ARM::SUBSrsi, ARM::SUBrsi}, 1808 {ARM::SUBSrsr, ARM::SUBrsr}, 1809 1810 {ARM::RSBSri, ARM::RSBri}, 1811 {ARM::RSBSrsi, ARM::RSBrsi}, 1812 {ARM::RSBSrsr, ARM::RSBrsr}, 1813 1814 {ARM::t2ADDSri, ARM::t2ADDri}, 1815 {ARM::t2ADDSrr, ARM::t2ADDrr}, 1816 {ARM::t2ADDSrs, ARM::t2ADDrs}, 1817 1818 {ARM::t2SUBSri, ARM::t2SUBri}, 1819 {ARM::t2SUBSrr, ARM::t2SUBrr}, 1820 {ARM::t2SUBSrs, ARM::t2SUBrs}, 1821 1822 {ARM::t2RSBSri, ARM::t2RSBri}, 1823 {ARM::t2RSBSrs, ARM::t2RSBrs}, 1824}; 1825 1826unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 1827 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 1828 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 1829 return AddSubFlagsOpcodeMap[i].MachineOpc; 1830 return 0; 1831} 1832 1833void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 1834 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 1835 unsigned DestReg, unsigned BaseReg, int NumBytes, 1836 ARMCC::CondCodes Pred, unsigned PredReg, 1837 const ARMBaseInstrInfo &TII, unsigned MIFlags) { 1838 if (NumBytes == 0 && DestReg != BaseReg) { 1839 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg) 1840 .addReg(BaseReg, RegState::Kill) 1841 .addImm((unsigned)Pred).addReg(PredReg).addReg(0) 1842 .setMIFlags(MIFlags); 1843 return; 1844 } 1845 1846 bool isSub = NumBytes < 0; 1847 if (isSub) NumBytes = -NumBytes; 1848 1849 while (NumBytes) { 1850 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 1851 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 1852 assert(ThisVal && "Didn't extract field correctly"); 1853 1854 // We will handle these bits from offset, clear them. 1855 NumBytes &= ~ThisVal; 1856 1857 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 1858 1859 // Build the new ADD / SUB. 1860 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 1861 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 1862 .addReg(BaseReg, RegState::Kill).addImm(ThisVal) 1863 .addImm((unsigned)Pred).addReg(PredReg).addReg(0) 1864 .setMIFlags(MIFlags); 1865 BaseReg = DestReg; 1866 } 1867} 1868 1869static bool isAnySubRegLive(unsigned Reg, const TargetRegisterInfo *TRI, 1870 MachineInstr *MI) { 1871 for (MCSubRegIterator Subreg(Reg, TRI, /* IncludeSelf */ true); 1872 Subreg.isValid(); ++Subreg) 1873 if (MI->getParent()->computeRegisterLiveness(TRI, *Subreg, MI) != 1874 MachineBasicBlock::LQR_Dead) 1875 return true; 1876 return false; 1877} 1878bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, 1879 MachineFunction &MF, MachineInstr *MI, 1880 unsigned NumBytes) { 1881 // This optimisation potentially adds lots of load and store 1882 // micro-operations, it's only really a great benefit to code-size. 1883 if (!Subtarget.isMinSize()) 1884 return false; 1885 1886 // If only one register is pushed/popped, LLVM can use an LDR/STR 1887 // instead. We can't modify those so make sure we're dealing with an 1888 // instruction we understand. 1889 bool IsPop = isPopOpcode(MI->getOpcode()); 1890 bool IsPush = isPushOpcode(MI->getOpcode()); 1891 if (!IsPush && !IsPop) 1892 return false; 1893 1894 bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD || 1895 MI->getOpcode() == ARM::VLDMDIA_UPD; 1896 bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH || 1897 MI->getOpcode() == ARM::tPOP || 1898 MI->getOpcode() == ARM::tPOP_RET; 1899 1900 assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP && 1901 MI->getOperand(1).getReg() == ARM::SP)) && 1902 "trying to fold sp update into non-sp-updating push/pop"); 1903 1904 // The VFP push & pop act on D-registers, so we can only fold an adjustment 1905 // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try 1906 // if this is violated. 1907 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0) 1908 return false; 1909 1910 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+ 1911 // pred) so the list starts at 4. Thumb1 starts after the predicate. 1912 int RegListIdx = IsT1PushPop ? 2 : 4; 1913 1914 // Calculate the space we'll need in terms of registers. 1915 unsigned FirstReg = MI->getOperand(RegListIdx).getReg(); 1916 unsigned RD0Reg, RegsNeeded; 1917 if (IsVFPPushPop) { 1918 RD0Reg = ARM::D0; 1919 RegsNeeded = NumBytes / 8; 1920 } else { 1921 RD0Reg = ARM::R0; 1922 RegsNeeded = NumBytes / 4; 1923 } 1924 1925 // We're going to have to strip all list operands off before 1926 // re-adding them since the order matters, so save the existing ones 1927 // for later. 1928 SmallVector<MachineOperand, 4> RegList; 1929 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) 1930 RegList.push_back(MI->getOperand(i)); 1931 1932 const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo(); 1933 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); 1934 1935 // Now try to find enough space in the reglist to allocate NumBytes. 1936 for (unsigned CurReg = FirstReg - 1; CurReg >= RD0Reg && RegsNeeded; 1937 --CurReg) { 1938 if (!IsPop) { 1939 // Pushing any register is completely harmless, mark the 1940 // register involved as undef since we don't care about it in 1941 // the slightest. 1942 RegList.push_back(MachineOperand::CreateReg(CurReg, false, false, 1943 false, false, true)); 1944 --RegsNeeded; 1945 continue; 1946 } 1947 1948 // However, we can only pop an extra register if it's not live. For 1949 // registers live within the function we might clobber a return value 1950 // register; the other way a register can be live here is if it's 1951 // callee-saved. 1952 // TODO: Currently, computeRegisterLiveness() does not report "live" if a 1953 // sub reg is live. When computeRegisterLiveness() works for sub reg, it 1954 // can replace isAnySubRegLive(). 1955 if (isCalleeSavedRegister(CurReg, CSRegs) || 1956 isAnySubRegLive(CurReg, TRI, MI)) { 1957 // VFP pops don't allow holes in the register list, so any skip is fatal 1958 // for our transformation. GPR pops do, so we should just keep looking. 1959 if (IsVFPPushPop) 1960 return false; 1961 else 1962 continue; 1963 } 1964 1965 // Mark the unimportant registers as <def,dead> in the POP. 1966 RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false, 1967 true)); 1968 --RegsNeeded; 1969 } 1970 1971 if (RegsNeeded > 0) 1972 return false; 1973 1974 // Finally we know we can profitably perform the optimisation so go 1975 // ahead: strip all existing registers off and add them back again 1976 // in the right order. 1977 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) 1978 MI->RemoveOperand(i); 1979 1980 // Add the complete list back in. 1981 MachineInstrBuilder MIB(MF, &*MI); 1982 for (int i = RegList.size() - 1; i >= 0; --i) 1983 MIB.addOperand(RegList[i]); 1984 1985 return true; 1986} 1987 1988bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 1989 unsigned FrameReg, int &Offset, 1990 const ARMBaseInstrInfo &TII) { 1991 unsigned Opcode = MI.getOpcode(); 1992 const MCInstrDesc &Desc = MI.getDesc(); 1993 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 1994 bool isSub = false; 1995 1996 // Memory operands in inline assembly always use AddrMode2. 1997 if (Opcode == ARM::INLINEASM) 1998 AddrMode = ARMII::AddrMode2; 1999 2000 if (Opcode == ARM::ADDri) { 2001 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 2002 if (Offset == 0) { 2003 // Turn it into a move. 2004 MI.setDesc(TII.get(ARM::MOVr)); 2005 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2006 MI.RemoveOperand(FrameRegIdx+1); 2007 Offset = 0; 2008 return true; 2009 } else if (Offset < 0) { 2010 Offset = -Offset; 2011 isSub = true; 2012 MI.setDesc(TII.get(ARM::SUBri)); 2013 } 2014 2015 // Common case: small offset, fits into instruction. 2016 if (ARM_AM::getSOImmVal(Offset) != -1) { 2017 // Replace the FrameIndex with sp / fp 2018 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2019 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 2020 Offset = 0; 2021 return true; 2022 } 2023 2024 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 2025 // as possible. 2026 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 2027 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 2028 2029 // We will handle these bits from offset, clear them. 2030 Offset &= ~ThisImmVal; 2031 2032 // Get the properly encoded SOImmVal field. 2033 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 2034 "Bit extraction didn't work?"); 2035 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 2036 } else { 2037 unsigned ImmIdx = 0; 2038 int InstrOffs = 0; 2039 unsigned NumBits = 0; 2040 unsigned Scale = 1; 2041 switch (AddrMode) { 2042 case ARMII::AddrMode_i12: { 2043 ImmIdx = FrameRegIdx + 1; 2044 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2045 NumBits = 12; 2046 break; 2047 } 2048 case ARMII::AddrMode2: { 2049 ImmIdx = FrameRegIdx+2; 2050 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 2051 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2052 InstrOffs *= -1; 2053 NumBits = 12; 2054 break; 2055 } 2056 case ARMII::AddrMode3: { 2057 ImmIdx = FrameRegIdx+2; 2058 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 2059 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2060 InstrOffs *= -1; 2061 NumBits = 8; 2062 break; 2063 } 2064 case ARMII::AddrMode4: 2065 case ARMII::AddrMode6: 2066 // Can't fold any offset even if it's zero. 2067 return false; 2068 case ARMII::AddrMode5: { 2069 ImmIdx = FrameRegIdx+1; 2070 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2071 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2072 InstrOffs *= -1; 2073 NumBits = 8; 2074 Scale = 4; 2075 break; 2076 } 2077 default: 2078 llvm_unreachable("Unsupported addressing mode!"); 2079 } 2080 2081 Offset += InstrOffs * Scale; 2082 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 2083 if (Offset < 0) { 2084 Offset = -Offset; 2085 isSub = true; 2086 } 2087 2088 // Attempt to fold address comp. if opcode has offset bits 2089 if (NumBits > 0) { 2090 // Common case: small offset, fits into instruction. 2091 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 2092 int ImmedOffset = Offset / Scale; 2093 unsigned Mask = (1 << NumBits) - 1; 2094 if ((unsigned)Offset <= Mask * Scale) { 2095 // Replace the FrameIndex with sp 2096 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2097 // FIXME: When addrmode2 goes away, this will simplify (like the 2098 // T2 version), as the LDR.i12 versions don't need the encoding 2099 // tricks for the offset value. 2100 if (isSub) { 2101 if (AddrMode == ARMII::AddrMode_i12) 2102 ImmedOffset = -ImmedOffset; 2103 else 2104 ImmedOffset |= 1 << NumBits; 2105 } 2106 ImmOp.ChangeToImmediate(ImmedOffset); 2107 Offset = 0; 2108 return true; 2109 } 2110 2111 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 2112 ImmedOffset = ImmedOffset & Mask; 2113 if (isSub) { 2114 if (AddrMode == ARMII::AddrMode_i12) 2115 ImmedOffset = -ImmedOffset; 2116 else 2117 ImmedOffset |= 1 << NumBits; 2118 } 2119 ImmOp.ChangeToImmediate(ImmedOffset); 2120 Offset &= ~(Mask*Scale); 2121 } 2122 } 2123 2124 Offset = (isSub) ? -Offset : Offset; 2125 return Offset == 0; 2126} 2127 2128/// analyzeCompare - For a comparison instruction, return the source registers 2129/// in SrcReg and SrcReg2 if having two register operands, and the value it 2130/// compares against in CmpValue. Return true if the comparison instruction 2131/// can be analyzed. 2132bool ARMBaseInstrInfo:: 2133analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2, 2134 int &CmpMask, int &CmpValue) const { 2135 switch (MI->getOpcode()) { 2136 default: break; 2137 case ARM::CMPri: 2138 case ARM::t2CMPri: 2139 SrcReg = MI->getOperand(0).getReg(); 2140 SrcReg2 = 0; 2141 CmpMask = ~0; 2142 CmpValue = MI->getOperand(1).getImm(); 2143 return true; 2144 case ARM::CMPrr: 2145 case ARM::t2CMPrr: 2146 SrcReg = MI->getOperand(0).getReg(); 2147 SrcReg2 = MI->getOperand(1).getReg(); 2148 CmpMask = ~0; 2149 CmpValue = 0; 2150 return true; 2151 case ARM::TSTri: 2152 case ARM::t2TSTri: 2153 SrcReg = MI->getOperand(0).getReg(); 2154 SrcReg2 = 0; 2155 CmpMask = MI->getOperand(1).getImm(); 2156 CmpValue = 0; 2157 return true; 2158 } 2159 2160 return false; 2161} 2162 2163/// isSuitableForMask - Identify a suitable 'and' instruction that 2164/// operates on the given source register and applies the same mask 2165/// as a 'tst' instruction. Provide a limited look-through for copies. 2166/// When successful, MI will hold the found instruction. 2167static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, 2168 int CmpMask, bool CommonUse) { 2169 switch (MI->getOpcode()) { 2170 case ARM::ANDri: 2171 case ARM::t2ANDri: 2172 if (CmpMask != MI->getOperand(2).getImm()) 2173 return false; 2174 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 2175 return true; 2176 break; 2177 case ARM::COPY: { 2178 // Walk down one instruction which is potentially an 'and'. 2179 const MachineInstr &Copy = *MI; 2180 MachineBasicBlock::iterator AND( 2181 std::next(MachineBasicBlock::iterator(MI))); 2182 if (AND == MI->getParent()->end()) return false; 2183 MI = AND; 2184 return isSuitableForMask(MI, Copy.getOperand(0).getReg(), 2185 CmpMask, true); 2186 } 2187 } 2188 2189 return false; 2190} 2191 2192/// getSwappedCondition - assume the flags are set by MI(a,b), return 2193/// the condition code if we modify the instructions such that flags are 2194/// set by MI(b,a). 2195inline static ARMCC::CondCodes getSwappedCondition(ARMCC::CondCodes CC) { 2196 switch (CC) { 2197 default: return ARMCC::AL; 2198 case ARMCC::EQ: return ARMCC::EQ; 2199 case ARMCC::NE: return ARMCC::NE; 2200 case ARMCC::HS: return ARMCC::LS; 2201 case ARMCC::LO: return ARMCC::HI; 2202 case ARMCC::HI: return ARMCC::LO; 2203 case ARMCC::LS: return ARMCC::HS; 2204 case ARMCC::GE: return ARMCC::LE; 2205 case ARMCC::LT: return ARMCC::GT; 2206 case ARMCC::GT: return ARMCC::LT; 2207 case ARMCC::LE: return ARMCC::GE; 2208 } 2209} 2210 2211/// isRedundantFlagInstr - check whether the first instruction, whose only 2212/// purpose is to update flags, can be made redundant. 2213/// CMPrr can be made redundant by SUBrr if the operands are the same. 2214/// CMPri can be made redundant by SUBri if the operands are the same. 2215/// This function can be extended later on. 2216inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg, 2217 unsigned SrcReg2, int ImmValue, 2218 MachineInstr *OI) { 2219 if ((CmpI->getOpcode() == ARM::CMPrr || 2220 CmpI->getOpcode() == ARM::t2CMPrr) && 2221 (OI->getOpcode() == ARM::SUBrr || 2222 OI->getOpcode() == ARM::t2SUBrr) && 2223 ((OI->getOperand(1).getReg() == SrcReg && 2224 OI->getOperand(2).getReg() == SrcReg2) || 2225 (OI->getOperand(1).getReg() == SrcReg2 && 2226 OI->getOperand(2).getReg() == SrcReg))) 2227 return true; 2228 2229 if ((CmpI->getOpcode() == ARM::CMPri || 2230 CmpI->getOpcode() == ARM::t2CMPri) && 2231 (OI->getOpcode() == ARM::SUBri || 2232 OI->getOpcode() == ARM::t2SUBri) && 2233 OI->getOperand(1).getReg() == SrcReg && 2234 OI->getOperand(2).getImm() == ImmValue) 2235 return true; 2236 return false; 2237} 2238 2239/// optimizeCompareInstr - Convert the instruction supplying the argument to the 2240/// comparison into one that sets the zero bit in the flags register; 2241/// Remove a redundant Compare instruction if an earlier instruction can set the 2242/// flags in the same way as Compare. 2243/// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two 2244/// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the 2245/// condition code of instructions which use the flags. 2246bool ARMBaseInstrInfo:: 2247optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, 2248 int CmpMask, int CmpValue, 2249 const MachineRegisterInfo *MRI) const { 2250 // Get the unique definition of SrcReg. 2251 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 2252 if (!MI) return false; 2253 2254 // Masked compares sometimes use the same register as the corresponding 'and'. 2255 if (CmpMask != ~0) { 2256 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) { 2257 MI = 0; 2258 for (MachineRegisterInfo::use_instr_iterator 2259 UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end(); 2260 UI != UE; ++UI) { 2261 if (UI->getParent() != CmpInstr->getParent()) continue; 2262 MachineInstr *PotentialAND = &*UI; 2263 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || 2264 isPredicated(PotentialAND)) 2265 continue; 2266 MI = PotentialAND; 2267 break; 2268 } 2269 if (!MI) return false; 2270 } 2271 } 2272 2273 // Get ready to iterate backward from CmpInstr. 2274 MachineBasicBlock::iterator I = CmpInstr, E = MI, 2275 B = CmpInstr->getParent()->begin(); 2276 2277 // Early exit if CmpInstr is at the beginning of the BB. 2278 if (I == B) return false; 2279 2280 // There are two possible candidates which can be changed to set CPSR: 2281 // One is MI, the other is a SUB instruction. 2282 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). 2283 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 2284 MachineInstr *Sub = NULL; 2285 if (SrcReg2 != 0) 2286 // MI is not a candidate for CMPrr. 2287 MI = NULL; 2288 else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) { 2289 // Conservatively refuse to convert an instruction which isn't in the same 2290 // BB as the comparison. 2291 // For CMPri, we need to check Sub, thus we can't return here. 2292 if (CmpInstr->getOpcode() == ARM::CMPri || 2293 CmpInstr->getOpcode() == ARM::t2CMPri) 2294 MI = NULL; 2295 else 2296 return false; 2297 } 2298 2299 // Check that CPSR isn't set between the comparison instruction and the one we 2300 // want to change. At the same time, search for Sub. 2301 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2302 --I; 2303 for (; I != E; --I) { 2304 const MachineInstr &Instr = *I; 2305 2306 if (Instr.modifiesRegister(ARM::CPSR, TRI) || 2307 Instr.readsRegister(ARM::CPSR, TRI)) 2308 // This instruction modifies or uses CPSR after the one we want to 2309 // change. We can't do this transformation. 2310 return false; 2311 2312 // Check whether CmpInstr can be made redundant by the current instruction. 2313 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) { 2314 Sub = &*I; 2315 break; 2316 } 2317 2318 if (I == B) 2319 // The 'and' is below the comparison instruction. 2320 return false; 2321 } 2322 2323 // Return false if no candidates exist. 2324 if (!MI && !Sub) 2325 return false; 2326 2327 // The single candidate is called MI. 2328 if (!MI) MI = Sub; 2329 2330 // We can't use a predicated instruction - it doesn't always write the flags. 2331 if (isPredicated(MI)) 2332 return false; 2333 2334 switch (MI->getOpcode()) { 2335 default: break; 2336 case ARM::RSBrr: 2337 case ARM::RSBri: 2338 case ARM::RSCrr: 2339 case ARM::RSCri: 2340 case ARM::ADDrr: 2341 case ARM::ADDri: 2342 case ARM::ADCrr: 2343 case ARM::ADCri: 2344 case ARM::SUBrr: 2345 case ARM::SUBri: 2346 case ARM::SBCrr: 2347 case ARM::SBCri: 2348 case ARM::t2RSBri: 2349 case ARM::t2ADDrr: 2350 case ARM::t2ADDri: 2351 case ARM::t2ADCrr: 2352 case ARM::t2ADCri: 2353 case ARM::t2SUBrr: 2354 case ARM::t2SUBri: 2355 case ARM::t2SBCrr: 2356 case ARM::t2SBCri: 2357 case ARM::ANDrr: 2358 case ARM::ANDri: 2359 case ARM::t2ANDrr: 2360 case ARM::t2ANDri: 2361 case ARM::ORRrr: 2362 case ARM::ORRri: 2363 case ARM::t2ORRrr: 2364 case ARM::t2ORRri: 2365 case ARM::EORrr: 2366 case ARM::EORri: 2367 case ARM::t2EORrr: 2368 case ARM::t2EORri: { 2369 // Scan forward for the use of CPSR 2370 // When checking against MI: if it's a conditional code requires 2371 // checking of V bit, then this is not safe to do. 2372 // It is safe to remove CmpInstr if CPSR is redefined or killed. 2373 // If we are done with the basic block, we need to check whether CPSR is 2374 // live-out. 2375 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4> 2376 OperandsToUpdate; 2377 bool isSafe = false; 2378 I = CmpInstr; 2379 E = CmpInstr->getParent()->end(); 2380 while (!isSafe && ++I != E) { 2381 const MachineInstr &Instr = *I; 2382 for (unsigned IO = 0, EO = Instr.getNumOperands(); 2383 !isSafe && IO != EO; ++IO) { 2384 const MachineOperand &MO = Instr.getOperand(IO); 2385 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 2386 isSafe = true; 2387 break; 2388 } 2389 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 2390 continue; 2391 if (MO.isDef()) { 2392 isSafe = true; 2393 break; 2394 } 2395 // Condition code is after the operand before CPSR except for VSELs. 2396 ARMCC::CondCodes CC; 2397 bool IsInstrVSel = true; 2398 switch (Instr.getOpcode()) { 2399 default: 2400 IsInstrVSel = false; 2401 CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm(); 2402 break; 2403 case ARM::VSELEQD: 2404 case ARM::VSELEQS: 2405 CC = ARMCC::EQ; 2406 break; 2407 case ARM::VSELGTD: 2408 case ARM::VSELGTS: 2409 CC = ARMCC::GT; 2410 break; 2411 case ARM::VSELGED: 2412 case ARM::VSELGES: 2413 CC = ARMCC::GE; 2414 break; 2415 case ARM::VSELVSS: 2416 case ARM::VSELVSD: 2417 CC = ARMCC::VS; 2418 break; 2419 } 2420 2421 if (Sub) { 2422 ARMCC::CondCodes NewCC = getSwappedCondition(CC); 2423 if (NewCC == ARMCC::AL) 2424 return false; 2425 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 2426 // on CMP needs to be updated to be based on SUB. 2427 // Push the condition code operands to OperandsToUpdate. 2428 // If it is safe to remove CmpInstr, the condition code of these 2429 // operands will be modified. 2430 if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 2431 Sub->getOperand(2).getReg() == SrcReg) { 2432 // VSel doesn't support condition code update. 2433 if (IsInstrVSel) 2434 return false; 2435 OperandsToUpdate.push_back( 2436 std::make_pair(&((*I).getOperand(IO - 1)), NewCC)); 2437 } 2438 } else 2439 switch (CC) { 2440 default: 2441 // CPSR can be used multiple times, we should continue. 2442 break; 2443 case ARMCC::VS: 2444 case ARMCC::VC: 2445 case ARMCC::GE: 2446 case ARMCC::LT: 2447 case ARMCC::GT: 2448 case ARMCC::LE: 2449 return false; 2450 } 2451 } 2452 } 2453 2454 // If CPSR is not killed nor re-defined, we should check whether it is 2455 // live-out. If it is live-out, do not optimize. 2456 if (!isSafe) { 2457 MachineBasicBlock *MBB = CmpInstr->getParent(); 2458 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 2459 SE = MBB->succ_end(); SI != SE; ++SI) 2460 if ((*SI)->isLiveIn(ARM::CPSR)) 2461 return false; 2462 } 2463 2464 // Toggle the optional operand to CPSR. 2465 MI->getOperand(5).setReg(ARM::CPSR); 2466 MI->getOperand(5).setIsDef(true); 2467 assert(!isPredicated(MI) && "Can't use flags from predicated instruction"); 2468 CmpInstr->eraseFromParent(); 2469 2470 // Modify the condition code of operands in OperandsToUpdate. 2471 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 2472 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 2473 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++) 2474 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second); 2475 return true; 2476 } 2477 } 2478 2479 return false; 2480} 2481 2482bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, 2483 MachineInstr *DefMI, unsigned Reg, 2484 MachineRegisterInfo *MRI) const { 2485 // Fold large immediates into add, sub, or, xor. 2486 unsigned DefOpc = DefMI->getOpcode(); 2487 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 2488 return false; 2489 if (!DefMI->getOperand(1).isImm()) 2490 // Could be t2MOVi32imm <ga:xx> 2491 return false; 2492 2493 if (!MRI->hasOneNonDBGUse(Reg)) 2494 return false; 2495 2496 const MCInstrDesc &DefMCID = DefMI->getDesc(); 2497 if (DefMCID.hasOptionalDef()) { 2498 unsigned NumOps = DefMCID.getNumOperands(); 2499 const MachineOperand &MO = DefMI->getOperand(NumOps-1); 2500 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 2501 // If DefMI defines CPSR and it is not dead, it's obviously not safe 2502 // to delete DefMI. 2503 return false; 2504 } 2505 2506 const MCInstrDesc &UseMCID = UseMI->getDesc(); 2507 if (UseMCID.hasOptionalDef()) { 2508 unsigned NumOps = UseMCID.getNumOperands(); 2509 if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR) 2510 // If the instruction sets the flag, do not attempt this optimization 2511 // since it may change the semantics of the code. 2512 return false; 2513 } 2514 2515 unsigned UseOpc = UseMI->getOpcode(); 2516 unsigned NewUseOpc = 0; 2517 uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm(); 2518 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 2519 bool Commute = false; 2520 switch (UseOpc) { 2521 default: return false; 2522 case ARM::SUBrr: 2523 case ARM::ADDrr: 2524 case ARM::ORRrr: 2525 case ARM::EORrr: 2526 case ARM::t2SUBrr: 2527 case ARM::t2ADDrr: 2528 case ARM::t2ORRrr: 2529 case ARM::t2EORrr: { 2530 Commute = UseMI->getOperand(2).getReg() != Reg; 2531 switch (UseOpc) { 2532 default: break; 2533 case ARM::SUBrr: { 2534 if (Commute) 2535 return false; 2536 ImmVal = -ImmVal; 2537 NewUseOpc = ARM::SUBri; 2538 // Fallthrough 2539 } 2540 case ARM::ADDrr: 2541 case ARM::ORRrr: 2542 case ARM::EORrr: { 2543 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 2544 return false; 2545 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 2546 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 2547 switch (UseOpc) { 2548 default: break; 2549 case ARM::ADDrr: NewUseOpc = ARM::ADDri; break; 2550 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 2551 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 2552 } 2553 break; 2554 } 2555 case ARM::t2SUBrr: { 2556 if (Commute) 2557 return false; 2558 ImmVal = -ImmVal; 2559 NewUseOpc = ARM::t2SUBri; 2560 // Fallthrough 2561 } 2562 case ARM::t2ADDrr: 2563 case ARM::t2ORRrr: 2564 case ARM::t2EORrr: { 2565 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 2566 return false; 2567 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 2568 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 2569 switch (UseOpc) { 2570 default: break; 2571 case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break; 2572 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 2573 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 2574 } 2575 break; 2576 } 2577 } 2578 } 2579 } 2580 2581 unsigned OpIdx = Commute ? 2 : 1; 2582 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg(); 2583 bool isKill = UseMI->getOperand(OpIdx).isKill(); 2584 unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); 2585 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(), 2586 UseMI, UseMI->getDebugLoc(), 2587 get(NewUseOpc), NewReg) 2588 .addReg(Reg1, getKillRegState(isKill)) 2589 .addImm(SOImmValV1))); 2590 UseMI->setDesc(get(NewUseOpc)); 2591 UseMI->getOperand(1).setReg(NewReg); 2592 UseMI->getOperand(1).setIsKill(); 2593 UseMI->getOperand(2).ChangeToImmediate(SOImmValV2); 2594 DefMI->eraseFromParent(); 2595 return true; 2596} 2597 2598static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, 2599 const MachineInstr *MI) { 2600 switch (MI->getOpcode()) { 2601 default: { 2602 const MCInstrDesc &Desc = MI->getDesc(); 2603 int UOps = ItinData->getNumMicroOps(Desc.getSchedClass()); 2604 assert(UOps >= 0 && "bad # UOps"); 2605 return UOps; 2606 } 2607 2608 case ARM::LDRrs: 2609 case ARM::LDRBrs: 2610 case ARM::STRrs: 2611 case ARM::STRBrs: { 2612 unsigned ShOpVal = MI->getOperand(3).getImm(); 2613 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 2614 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2615 if (!isSub && 2616 (ShImm == 0 || 2617 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 2618 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 2619 return 1; 2620 return 2; 2621 } 2622 2623 case ARM::LDRH: 2624 case ARM::STRH: { 2625 if (!MI->getOperand(2).getReg()) 2626 return 1; 2627 2628 unsigned ShOpVal = MI->getOperand(3).getImm(); 2629 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 2630 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2631 if (!isSub && 2632 (ShImm == 0 || 2633 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 2634 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 2635 return 1; 2636 return 2; 2637 } 2638 2639 case ARM::LDRSB: 2640 case ARM::LDRSH: 2641 return (ARM_AM::getAM3Op(MI->getOperand(3).getImm()) == ARM_AM::sub) ? 3:2; 2642 2643 case ARM::LDRSB_POST: 2644 case ARM::LDRSH_POST: { 2645 unsigned Rt = MI->getOperand(0).getReg(); 2646 unsigned Rm = MI->getOperand(3).getReg(); 2647 return (Rt == Rm) ? 4 : 3; 2648 } 2649 2650 case ARM::LDR_PRE_REG: 2651 case ARM::LDRB_PRE_REG: { 2652 unsigned Rt = MI->getOperand(0).getReg(); 2653 unsigned Rm = MI->getOperand(3).getReg(); 2654 if (Rt == Rm) 2655 return 3; 2656 unsigned ShOpVal = MI->getOperand(4).getImm(); 2657 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 2658 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2659 if (!isSub && 2660 (ShImm == 0 || 2661 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 2662 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 2663 return 2; 2664 return 3; 2665 } 2666 2667 case ARM::STR_PRE_REG: 2668 case ARM::STRB_PRE_REG: { 2669 unsigned ShOpVal = MI->getOperand(4).getImm(); 2670 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 2671 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2672 if (!isSub && 2673 (ShImm == 0 || 2674 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 2675 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 2676 return 2; 2677 return 3; 2678 } 2679 2680 case ARM::LDRH_PRE: 2681 case ARM::STRH_PRE: { 2682 unsigned Rt = MI->getOperand(0).getReg(); 2683 unsigned Rm = MI->getOperand(3).getReg(); 2684 if (!Rm) 2685 return 2; 2686 if (Rt == Rm) 2687 return 3; 2688 return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) 2689 ? 3 : 2; 2690 } 2691 2692 case ARM::LDR_POST_REG: 2693 case ARM::LDRB_POST_REG: 2694 case ARM::LDRH_POST: { 2695 unsigned Rt = MI->getOperand(0).getReg(); 2696 unsigned Rm = MI->getOperand(3).getReg(); 2697 return (Rt == Rm) ? 3 : 2; 2698 } 2699 2700 case ARM::LDR_PRE_IMM: 2701 case ARM::LDRB_PRE_IMM: 2702 case ARM::LDR_POST_IMM: 2703 case ARM::LDRB_POST_IMM: 2704 case ARM::STRB_POST_IMM: 2705 case ARM::STRB_POST_REG: 2706 case ARM::STRB_PRE_IMM: 2707 case ARM::STRH_POST: 2708 case ARM::STR_POST_IMM: 2709 case ARM::STR_POST_REG: 2710 case ARM::STR_PRE_IMM: 2711 return 2; 2712 2713 case ARM::LDRSB_PRE: 2714 case ARM::LDRSH_PRE: { 2715 unsigned Rm = MI->getOperand(3).getReg(); 2716 if (Rm == 0) 2717 return 3; 2718 unsigned Rt = MI->getOperand(0).getReg(); 2719 if (Rt == Rm) 2720 return 4; 2721 unsigned ShOpVal = MI->getOperand(4).getImm(); 2722 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 2723 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2724 if (!isSub && 2725 (ShImm == 0 || 2726 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 2727 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 2728 return 3; 2729 return 4; 2730 } 2731 2732 case ARM::LDRD: { 2733 unsigned Rt = MI->getOperand(0).getReg(); 2734 unsigned Rn = MI->getOperand(2).getReg(); 2735 unsigned Rm = MI->getOperand(3).getReg(); 2736 if (Rm) 2737 return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3; 2738 return (Rt == Rn) ? 3 : 2; 2739 } 2740 2741 case ARM::STRD: { 2742 unsigned Rm = MI->getOperand(3).getReg(); 2743 if (Rm) 2744 return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3; 2745 return 2; 2746 } 2747 2748 case ARM::LDRD_POST: 2749 case ARM::t2LDRD_POST: 2750 return 3; 2751 2752 case ARM::STRD_POST: 2753 case ARM::t2STRD_POST: 2754 return 4; 2755 2756 case ARM::LDRD_PRE: { 2757 unsigned Rt = MI->getOperand(0).getReg(); 2758 unsigned Rn = MI->getOperand(3).getReg(); 2759 unsigned Rm = MI->getOperand(4).getReg(); 2760 if (Rm) 2761 return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4; 2762 return (Rt == Rn) ? 4 : 3; 2763 } 2764 2765 case ARM::t2LDRD_PRE: { 2766 unsigned Rt = MI->getOperand(0).getReg(); 2767 unsigned Rn = MI->getOperand(3).getReg(); 2768 return (Rt == Rn) ? 4 : 3; 2769 } 2770 2771 case ARM::STRD_PRE: { 2772 unsigned Rm = MI->getOperand(4).getReg(); 2773 if (Rm) 2774 return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4; 2775 return 3; 2776 } 2777 2778 case ARM::t2STRD_PRE: 2779 return 3; 2780 2781 case ARM::t2LDR_POST: 2782 case ARM::t2LDRB_POST: 2783 case ARM::t2LDRB_PRE: 2784 case ARM::t2LDRSBi12: 2785 case ARM::t2LDRSBi8: 2786 case ARM::t2LDRSBpci: 2787 case ARM::t2LDRSBs: 2788 case ARM::t2LDRH_POST: 2789 case ARM::t2LDRH_PRE: 2790 case ARM::t2LDRSBT: 2791 case ARM::t2LDRSB_POST: 2792 case ARM::t2LDRSB_PRE: 2793 case ARM::t2LDRSH_POST: 2794 case ARM::t2LDRSH_PRE: 2795 case ARM::t2LDRSHi12: 2796 case ARM::t2LDRSHi8: 2797 case ARM::t2LDRSHpci: 2798 case ARM::t2LDRSHs: 2799 return 2; 2800 2801 case ARM::t2LDRDi8: { 2802 unsigned Rt = MI->getOperand(0).getReg(); 2803 unsigned Rn = MI->getOperand(2).getReg(); 2804 return (Rt == Rn) ? 3 : 2; 2805 } 2806 2807 case ARM::t2STRB_POST: 2808 case ARM::t2STRB_PRE: 2809 case ARM::t2STRBs: 2810 case ARM::t2STRDi8: 2811 case ARM::t2STRH_POST: 2812 case ARM::t2STRH_PRE: 2813 case ARM::t2STRHs: 2814 case ARM::t2STR_POST: 2815 case ARM::t2STR_PRE: 2816 case ARM::t2STRs: 2817 return 2; 2818 } 2819} 2820 2821// Return the number of 32-bit words loaded by LDM or stored by STM. If this 2822// can't be easily determined return 0 (missing MachineMemOperand). 2823// 2824// FIXME: The current MachineInstr design does not support relying on machine 2825// mem operands to determine the width of a memory access. Instead, we expect 2826// the target to provide this information based on the instruction opcode and 2827// operands. However, using MachineMemOperand is a the best solution now for 2828// two reasons: 2829// 2830// 1) getNumMicroOps tries to infer LDM memory width from the total number of MI 2831// operands. This is much more dangerous than using the MachineMemOperand 2832// sizes because CodeGen passes can insert/remove optional machine operands. In 2833// fact, it's totally incorrect for preRA passes and appears to be wrong for 2834// postRA passes as well. 2835// 2836// 2) getNumLDMAddresses is only used by the scheduling machine model and any 2837// machine model that calls this should handle the unknown (zero size) case. 2838// 2839// Long term, we should require a target hook that verifies MachineMemOperand 2840// sizes during MC lowering. That target hook should be local to MC lowering 2841// because we can't ensure that it is aware of other MI forms. Doing this will 2842// ensure that MachineMemOperands are correctly propagated through all passes. 2843unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr *MI) const { 2844 unsigned Size = 0; 2845 for (MachineInstr::mmo_iterator I = MI->memoperands_begin(), 2846 E = MI->memoperands_end(); I != E; ++I) { 2847 Size += (*I)->getSize(); 2848 } 2849 return Size / 4; 2850} 2851 2852unsigned 2853ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 2854 const MachineInstr *MI) const { 2855 if (!ItinData || ItinData->isEmpty()) 2856 return 1; 2857 2858 const MCInstrDesc &Desc = MI->getDesc(); 2859 unsigned Class = Desc.getSchedClass(); 2860 int ItinUOps = ItinData->getNumMicroOps(Class); 2861 if (ItinUOps >= 0) { 2862 if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore())) 2863 return getNumMicroOpsSwiftLdSt(ItinData, MI); 2864 2865 return ItinUOps; 2866 } 2867 2868 unsigned Opc = MI->getOpcode(); 2869 switch (Opc) { 2870 default: 2871 llvm_unreachable("Unexpected multi-uops instruction!"); 2872 case ARM::VLDMQIA: 2873 case ARM::VSTMQIA: 2874 return 2; 2875 2876 // The number of uOps for load / store multiple are determined by the number 2877 // registers. 2878 // 2879 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 2880 // same cycle. The scheduling for the first load / store must be done 2881 // separately by assuming the address is not 64-bit aligned. 2882 // 2883 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 2884 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 2885 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 2886 case ARM::VLDMDIA: 2887 case ARM::VLDMDIA_UPD: 2888 case ARM::VLDMDDB_UPD: 2889 case ARM::VLDMSIA: 2890 case ARM::VLDMSIA_UPD: 2891 case ARM::VLDMSDB_UPD: 2892 case ARM::VSTMDIA: 2893 case ARM::VSTMDIA_UPD: 2894 case ARM::VSTMDDB_UPD: 2895 case ARM::VSTMSIA: 2896 case ARM::VSTMSIA_UPD: 2897 case ARM::VSTMSDB_UPD: { 2898 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands(); 2899 return (NumRegs / 2) + (NumRegs % 2) + 1; 2900 } 2901 2902 case ARM::LDMIA_RET: 2903 case ARM::LDMIA: 2904 case ARM::LDMDA: 2905 case ARM::LDMDB: 2906 case ARM::LDMIB: 2907 case ARM::LDMIA_UPD: 2908 case ARM::LDMDA_UPD: 2909 case ARM::LDMDB_UPD: 2910 case ARM::LDMIB_UPD: 2911 case ARM::STMIA: 2912 case ARM::STMDA: 2913 case ARM::STMDB: 2914 case ARM::STMIB: 2915 case ARM::STMIA_UPD: 2916 case ARM::STMDA_UPD: 2917 case ARM::STMDB_UPD: 2918 case ARM::STMIB_UPD: 2919 case ARM::tLDMIA: 2920 case ARM::tLDMIA_UPD: 2921 case ARM::tSTMIA_UPD: 2922 case ARM::tPOP_RET: 2923 case ARM::tPOP: 2924 case ARM::tPUSH: 2925 case ARM::t2LDMIA_RET: 2926 case ARM::t2LDMIA: 2927 case ARM::t2LDMDB: 2928 case ARM::t2LDMIA_UPD: 2929 case ARM::t2LDMDB_UPD: 2930 case ARM::t2STMIA: 2931 case ARM::t2STMDB: 2932 case ARM::t2STMIA_UPD: 2933 case ARM::t2STMDB_UPD: { 2934 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1; 2935 if (Subtarget.isSwift()) { 2936 int UOps = 1 + NumRegs; // One for address computation, one for each ld / st. 2937 switch (Opc) { 2938 default: break; 2939 case ARM::VLDMDIA_UPD: 2940 case ARM::VLDMDDB_UPD: 2941 case ARM::VLDMSIA_UPD: 2942 case ARM::VLDMSDB_UPD: 2943 case ARM::VSTMDIA_UPD: 2944 case ARM::VSTMDDB_UPD: 2945 case ARM::VSTMSIA_UPD: 2946 case ARM::VSTMSDB_UPD: 2947 case ARM::LDMIA_UPD: 2948 case ARM::LDMDA_UPD: 2949 case ARM::LDMDB_UPD: 2950 case ARM::LDMIB_UPD: 2951 case ARM::STMIA_UPD: 2952 case ARM::STMDA_UPD: 2953 case ARM::STMDB_UPD: 2954 case ARM::STMIB_UPD: 2955 case ARM::tLDMIA_UPD: 2956 case ARM::tSTMIA_UPD: 2957 case ARM::t2LDMIA_UPD: 2958 case ARM::t2LDMDB_UPD: 2959 case ARM::t2STMIA_UPD: 2960 case ARM::t2STMDB_UPD: 2961 ++UOps; // One for base register writeback. 2962 break; 2963 case ARM::LDMIA_RET: 2964 case ARM::tPOP_RET: 2965 case ARM::t2LDMIA_RET: 2966 UOps += 2; // One for base reg wb, one for write to pc. 2967 break; 2968 } 2969 return UOps; 2970 } else if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 2971 if (NumRegs < 4) 2972 return 2; 2973 // 4 registers would be issued: 2, 2. 2974 // 5 registers would be issued: 2, 2, 1. 2975 int A8UOps = (NumRegs / 2); 2976 if (NumRegs % 2) 2977 ++A8UOps; 2978 return A8UOps; 2979 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 2980 int A9UOps = (NumRegs / 2); 2981 // If there are odd number of registers or if it's not 64-bit aligned, 2982 // then it takes an extra AGU (Address Generation Unit) cycle. 2983 if ((NumRegs % 2) || 2984 !MI->hasOneMemOperand() || 2985 (*MI->memoperands_begin())->getAlignment() < 8) 2986 ++A9UOps; 2987 return A9UOps; 2988 } else { 2989 // Assume the worst. 2990 return NumRegs; 2991 } 2992 } 2993 } 2994} 2995 2996int 2997ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 2998 const MCInstrDesc &DefMCID, 2999 unsigned DefClass, 3000 unsigned DefIdx, unsigned DefAlign) const { 3001 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3002 if (RegNo <= 0) 3003 // Def is the address writeback. 3004 return ItinData->getOperandCycle(DefClass, DefIdx); 3005 3006 int DefCycle; 3007 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3008 // (regno / 2) + (regno % 2) + 1 3009 DefCycle = RegNo / 2 + 1; 3010 if (RegNo % 2) 3011 ++DefCycle; 3012 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3013 DefCycle = RegNo; 3014 bool isSLoad = false; 3015 3016 switch (DefMCID.getOpcode()) { 3017 default: break; 3018 case ARM::VLDMSIA: 3019 case ARM::VLDMSIA_UPD: 3020 case ARM::VLDMSDB_UPD: 3021 isSLoad = true; 3022 break; 3023 } 3024 3025 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3026 // then it takes an extra cycle. 3027 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 3028 ++DefCycle; 3029 } else { 3030 // Assume the worst. 3031 DefCycle = RegNo + 2; 3032 } 3033 3034 return DefCycle; 3035} 3036 3037int 3038ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 3039 const MCInstrDesc &DefMCID, 3040 unsigned DefClass, 3041 unsigned DefIdx, unsigned DefAlign) const { 3042 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3043 if (RegNo <= 0) 3044 // Def is the address writeback. 3045 return ItinData->getOperandCycle(DefClass, DefIdx); 3046 3047 int DefCycle; 3048 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3049 // 4 registers would be issued: 1, 2, 1. 3050 // 5 registers would be issued: 1, 2, 2. 3051 DefCycle = RegNo / 2; 3052 if (DefCycle < 1) 3053 DefCycle = 1; 3054 // Result latency is issue cycle + 2: E2. 3055 DefCycle += 2; 3056 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3057 DefCycle = (RegNo / 2); 3058 // If there are odd number of registers or if it's not 64-bit aligned, 3059 // then it takes an extra AGU (Address Generation Unit) cycle. 3060 if ((RegNo % 2) || DefAlign < 8) 3061 ++DefCycle; 3062 // Result latency is AGU cycles + 2. 3063 DefCycle += 2; 3064 } else { 3065 // Assume the worst. 3066 DefCycle = RegNo + 2; 3067 } 3068 3069 return DefCycle; 3070} 3071 3072int 3073ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 3074 const MCInstrDesc &UseMCID, 3075 unsigned UseClass, 3076 unsigned UseIdx, unsigned UseAlign) const { 3077 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3078 if (RegNo <= 0) 3079 return ItinData->getOperandCycle(UseClass, UseIdx); 3080 3081 int UseCycle; 3082 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3083 // (regno / 2) + (regno % 2) + 1 3084 UseCycle = RegNo / 2 + 1; 3085 if (RegNo % 2) 3086 ++UseCycle; 3087 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3088 UseCycle = RegNo; 3089 bool isSStore = false; 3090 3091 switch (UseMCID.getOpcode()) { 3092 default: break; 3093 case ARM::VSTMSIA: 3094 case ARM::VSTMSIA_UPD: 3095 case ARM::VSTMSDB_UPD: 3096 isSStore = true; 3097 break; 3098 } 3099 3100 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3101 // then it takes an extra cycle. 3102 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 3103 ++UseCycle; 3104 } else { 3105 // Assume the worst. 3106 UseCycle = RegNo + 2; 3107 } 3108 3109 return UseCycle; 3110} 3111 3112int 3113ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 3114 const MCInstrDesc &UseMCID, 3115 unsigned UseClass, 3116 unsigned UseIdx, unsigned UseAlign) const { 3117 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3118 if (RegNo <= 0) 3119 return ItinData->getOperandCycle(UseClass, UseIdx); 3120 3121 int UseCycle; 3122 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3123 UseCycle = RegNo / 2; 3124 if (UseCycle < 2) 3125 UseCycle = 2; 3126 // Read in E3. 3127 UseCycle += 2; 3128 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3129 UseCycle = (RegNo / 2); 3130 // If there are odd number of registers or if it's not 64-bit aligned, 3131 // then it takes an extra AGU (Address Generation Unit) cycle. 3132 if ((RegNo % 2) || UseAlign < 8) 3133 ++UseCycle; 3134 } else { 3135 // Assume the worst. 3136 UseCycle = 1; 3137 } 3138 return UseCycle; 3139} 3140 3141int 3142ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 3143 const MCInstrDesc &DefMCID, 3144 unsigned DefIdx, unsigned DefAlign, 3145 const MCInstrDesc &UseMCID, 3146 unsigned UseIdx, unsigned UseAlign) const { 3147 unsigned DefClass = DefMCID.getSchedClass(); 3148 unsigned UseClass = UseMCID.getSchedClass(); 3149 3150 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 3151 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 3152 3153 // This may be a def / use of a variable_ops instruction, the operand 3154 // latency might be determinable dynamically. Let the target try to 3155 // figure it out. 3156 int DefCycle = -1; 3157 bool LdmBypass = false; 3158 switch (DefMCID.getOpcode()) { 3159 default: 3160 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 3161 break; 3162 3163 case ARM::VLDMDIA: 3164 case ARM::VLDMDIA_UPD: 3165 case ARM::VLDMDDB_UPD: 3166 case ARM::VLDMSIA: 3167 case ARM::VLDMSIA_UPD: 3168 case ARM::VLDMSDB_UPD: 3169 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 3170 break; 3171 3172 case ARM::LDMIA_RET: 3173 case ARM::LDMIA: 3174 case ARM::LDMDA: 3175 case ARM::LDMDB: 3176 case ARM::LDMIB: 3177 case ARM::LDMIA_UPD: 3178 case ARM::LDMDA_UPD: 3179 case ARM::LDMDB_UPD: 3180 case ARM::LDMIB_UPD: 3181 case ARM::tLDMIA: 3182 case ARM::tLDMIA_UPD: 3183 case ARM::tPUSH: 3184 case ARM::t2LDMIA_RET: 3185 case ARM::t2LDMIA: 3186 case ARM::t2LDMDB: 3187 case ARM::t2LDMIA_UPD: 3188 case ARM::t2LDMDB_UPD: 3189 LdmBypass = 1; 3190 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 3191 break; 3192 } 3193 3194 if (DefCycle == -1) 3195 // We can't seem to determine the result latency of the def, assume it's 2. 3196 DefCycle = 2; 3197 3198 int UseCycle = -1; 3199 switch (UseMCID.getOpcode()) { 3200 default: 3201 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 3202 break; 3203 3204 case ARM::VSTMDIA: 3205 case ARM::VSTMDIA_UPD: 3206 case ARM::VSTMDDB_UPD: 3207 case ARM::VSTMSIA: 3208 case ARM::VSTMSIA_UPD: 3209 case ARM::VSTMSDB_UPD: 3210 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 3211 break; 3212 3213 case ARM::STMIA: 3214 case ARM::STMDA: 3215 case ARM::STMDB: 3216 case ARM::STMIB: 3217 case ARM::STMIA_UPD: 3218 case ARM::STMDA_UPD: 3219 case ARM::STMDB_UPD: 3220 case ARM::STMIB_UPD: 3221 case ARM::tSTMIA_UPD: 3222 case ARM::tPOP_RET: 3223 case ARM::tPOP: 3224 case ARM::t2STMIA: 3225 case ARM::t2STMDB: 3226 case ARM::t2STMIA_UPD: 3227 case ARM::t2STMDB_UPD: 3228 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 3229 break; 3230 } 3231 3232 if (UseCycle == -1) 3233 // Assume it's read in the first stage. 3234 UseCycle = 1; 3235 3236 UseCycle = DefCycle - UseCycle + 1; 3237 if (UseCycle > 0) { 3238 if (LdmBypass) { 3239 // It's a variable_ops instruction so we can't use DefIdx here. Just use 3240 // first def operand. 3241 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 3242 UseClass, UseIdx)) 3243 --UseCycle; 3244 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 3245 UseClass, UseIdx)) { 3246 --UseCycle; 3247 } 3248 } 3249 3250 return UseCycle; 3251} 3252 3253static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 3254 const MachineInstr *MI, unsigned Reg, 3255 unsigned &DefIdx, unsigned &Dist) { 3256 Dist = 0; 3257 3258 MachineBasicBlock::const_iterator I = MI; ++I; 3259 MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator()); 3260 assert(II->isInsideBundle() && "Empty bundle?"); 3261 3262 int Idx = -1; 3263 while (II->isInsideBundle()) { 3264 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 3265 if (Idx != -1) 3266 break; 3267 --II; 3268 ++Dist; 3269 } 3270 3271 assert(Idx != -1 && "Cannot find bundled definition!"); 3272 DefIdx = Idx; 3273 return II; 3274} 3275 3276static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 3277 const MachineInstr *MI, unsigned Reg, 3278 unsigned &UseIdx, unsigned &Dist) { 3279 Dist = 0; 3280 3281 MachineBasicBlock::const_instr_iterator II = MI; ++II; 3282 assert(II->isInsideBundle() && "Empty bundle?"); 3283 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 3284 3285 // FIXME: This doesn't properly handle multiple uses. 3286 int Idx = -1; 3287 while (II != E && II->isInsideBundle()) { 3288 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 3289 if (Idx != -1) 3290 break; 3291 if (II->getOpcode() != ARM::t2IT) 3292 ++Dist; 3293 ++II; 3294 } 3295 3296 if (Idx == -1) { 3297 Dist = 0; 3298 return 0; 3299 } 3300 3301 UseIdx = Idx; 3302 return II; 3303} 3304 3305/// Return the number of cycles to add to (or subtract from) the static 3306/// itinerary based on the def opcode and alignment. The caller will ensure that 3307/// adjusted latency is at least one cycle. 3308static int adjustDefLatency(const ARMSubtarget &Subtarget, 3309 const MachineInstr *DefMI, 3310 const MCInstrDesc *DefMCID, unsigned DefAlign) { 3311 int Adjust = 0; 3312 if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) { 3313 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 3314 // variants are one cycle cheaper. 3315 switch (DefMCID->getOpcode()) { 3316 default: break; 3317 case ARM::LDRrs: 3318 case ARM::LDRBrs: { 3319 unsigned ShOpVal = DefMI->getOperand(3).getImm(); 3320 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3321 if (ShImm == 0 || 3322 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 3323 --Adjust; 3324 break; 3325 } 3326 case ARM::t2LDRs: 3327 case ARM::t2LDRBs: 3328 case ARM::t2LDRHs: 3329 case ARM::t2LDRSHs: { 3330 // Thumb2 mode: lsl only. 3331 unsigned ShAmt = DefMI->getOperand(3).getImm(); 3332 if (ShAmt == 0 || ShAmt == 2) 3333 --Adjust; 3334 break; 3335 } 3336 } 3337 } else if (Subtarget.isSwift()) { 3338 // FIXME: Properly handle all of the latency adjustments for address 3339 // writeback. 3340 switch (DefMCID->getOpcode()) { 3341 default: break; 3342 case ARM::LDRrs: 3343 case ARM::LDRBrs: { 3344 unsigned ShOpVal = DefMI->getOperand(3).getImm(); 3345 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3346 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3347 if (!isSub && 3348 (ShImm == 0 || 3349 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3350 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3351 Adjust -= 2; 3352 else if (!isSub && 3353 ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 3354 --Adjust; 3355 break; 3356 } 3357 case ARM::t2LDRs: 3358 case ARM::t2LDRBs: 3359 case ARM::t2LDRHs: 3360 case ARM::t2LDRSHs: { 3361 // Thumb2 mode: lsl only. 3362 unsigned ShAmt = DefMI->getOperand(3).getImm(); 3363 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3) 3364 Adjust -= 2; 3365 break; 3366 } 3367 } 3368 } 3369 3370 if (DefAlign < 8 && Subtarget.isLikeA9()) { 3371 switch (DefMCID->getOpcode()) { 3372 default: break; 3373 case ARM::VLD1q8: 3374 case ARM::VLD1q16: 3375 case ARM::VLD1q32: 3376 case ARM::VLD1q64: 3377 case ARM::VLD1q8wb_fixed: 3378 case ARM::VLD1q16wb_fixed: 3379 case ARM::VLD1q32wb_fixed: 3380 case ARM::VLD1q64wb_fixed: 3381 case ARM::VLD1q8wb_register: 3382 case ARM::VLD1q16wb_register: 3383 case ARM::VLD1q32wb_register: 3384 case ARM::VLD1q64wb_register: 3385 case ARM::VLD2d8: 3386 case ARM::VLD2d16: 3387 case ARM::VLD2d32: 3388 case ARM::VLD2q8: 3389 case ARM::VLD2q16: 3390 case ARM::VLD2q32: 3391 case ARM::VLD2d8wb_fixed: 3392 case ARM::VLD2d16wb_fixed: 3393 case ARM::VLD2d32wb_fixed: 3394 case ARM::VLD2q8wb_fixed: 3395 case ARM::VLD2q16wb_fixed: 3396 case ARM::VLD2q32wb_fixed: 3397 case ARM::VLD2d8wb_register: 3398 case ARM::VLD2d16wb_register: 3399 case ARM::VLD2d32wb_register: 3400 case ARM::VLD2q8wb_register: 3401 case ARM::VLD2q16wb_register: 3402 case ARM::VLD2q32wb_register: 3403 case ARM::VLD3d8: 3404 case ARM::VLD3d16: 3405 case ARM::VLD3d32: 3406 case ARM::VLD1d64T: 3407 case ARM::VLD3d8_UPD: 3408 case ARM::VLD3d16_UPD: 3409 case ARM::VLD3d32_UPD: 3410 case ARM::VLD1d64Twb_fixed: 3411 case ARM::VLD1d64Twb_register: 3412 case ARM::VLD3q8_UPD: 3413 case ARM::VLD3q16_UPD: 3414 case ARM::VLD3q32_UPD: 3415 case ARM::VLD4d8: 3416 case ARM::VLD4d16: 3417 case ARM::VLD4d32: 3418 case ARM::VLD1d64Q: 3419 case ARM::VLD4d8_UPD: 3420 case ARM::VLD4d16_UPD: 3421 case ARM::VLD4d32_UPD: 3422 case ARM::VLD1d64Qwb_fixed: 3423 case ARM::VLD1d64Qwb_register: 3424 case ARM::VLD4q8_UPD: 3425 case ARM::VLD4q16_UPD: 3426 case ARM::VLD4q32_UPD: 3427 case ARM::VLD1DUPq8: 3428 case ARM::VLD1DUPq16: 3429 case ARM::VLD1DUPq32: 3430 case ARM::VLD1DUPq8wb_fixed: 3431 case ARM::VLD1DUPq16wb_fixed: 3432 case ARM::VLD1DUPq32wb_fixed: 3433 case ARM::VLD1DUPq8wb_register: 3434 case ARM::VLD1DUPq16wb_register: 3435 case ARM::VLD1DUPq32wb_register: 3436 case ARM::VLD2DUPd8: 3437 case ARM::VLD2DUPd16: 3438 case ARM::VLD2DUPd32: 3439 case ARM::VLD2DUPd8wb_fixed: 3440 case ARM::VLD2DUPd16wb_fixed: 3441 case ARM::VLD2DUPd32wb_fixed: 3442 case ARM::VLD2DUPd8wb_register: 3443 case ARM::VLD2DUPd16wb_register: 3444 case ARM::VLD2DUPd32wb_register: 3445 case ARM::VLD4DUPd8: 3446 case ARM::VLD4DUPd16: 3447 case ARM::VLD4DUPd32: 3448 case ARM::VLD4DUPd8_UPD: 3449 case ARM::VLD4DUPd16_UPD: 3450 case ARM::VLD4DUPd32_UPD: 3451 case ARM::VLD1LNd8: 3452 case ARM::VLD1LNd16: 3453 case ARM::VLD1LNd32: 3454 case ARM::VLD1LNd8_UPD: 3455 case ARM::VLD1LNd16_UPD: 3456 case ARM::VLD1LNd32_UPD: 3457 case ARM::VLD2LNd8: 3458 case ARM::VLD2LNd16: 3459 case ARM::VLD2LNd32: 3460 case ARM::VLD2LNq16: 3461 case ARM::VLD2LNq32: 3462 case ARM::VLD2LNd8_UPD: 3463 case ARM::VLD2LNd16_UPD: 3464 case ARM::VLD2LNd32_UPD: 3465 case ARM::VLD2LNq16_UPD: 3466 case ARM::VLD2LNq32_UPD: 3467 case ARM::VLD4LNd8: 3468 case ARM::VLD4LNd16: 3469 case ARM::VLD4LNd32: 3470 case ARM::VLD4LNq16: 3471 case ARM::VLD4LNq32: 3472 case ARM::VLD4LNd8_UPD: 3473 case ARM::VLD4LNd16_UPD: 3474 case ARM::VLD4LNd32_UPD: 3475 case ARM::VLD4LNq16_UPD: 3476 case ARM::VLD4LNq32_UPD: 3477 // If the address is not 64-bit aligned, the latencies of these 3478 // instructions increases by one. 3479 ++Adjust; 3480 break; 3481 } 3482 } 3483 return Adjust; 3484} 3485 3486 3487 3488int 3489ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 3490 const MachineInstr *DefMI, unsigned DefIdx, 3491 const MachineInstr *UseMI, 3492 unsigned UseIdx) const { 3493 // No operand latency. The caller may fall back to getInstrLatency. 3494 if (!ItinData || ItinData->isEmpty()) 3495 return -1; 3496 3497 const MachineOperand &DefMO = DefMI->getOperand(DefIdx); 3498 unsigned Reg = DefMO.getReg(); 3499 const MCInstrDesc *DefMCID = &DefMI->getDesc(); 3500 const MCInstrDesc *UseMCID = &UseMI->getDesc(); 3501 3502 unsigned DefAdj = 0; 3503 if (DefMI->isBundle()) { 3504 DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj); 3505 DefMCID = &DefMI->getDesc(); 3506 } 3507 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() || 3508 DefMI->isRegSequence() || DefMI->isImplicitDef()) { 3509 return 1; 3510 } 3511 3512 unsigned UseAdj = 0; 3513 if (UseMI->isBundle()) { 3514 unsigned NewUseIdx; 3515 const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI, 3516 Reg, NewUseIdx, UseAdj); 3517 if (!NewUseMI) 3518 return -1; 3519 3520 UseMI = NewUseMI; 3521 UseIdx = NewUseIdx; 3522 UseMCID = &UseMI->getDesc(); 3523 } 3524 3525 if (Reg == ARM::CPSR) { 3526 if (DefMI->getOpcode() == ARM::FMSTAT) { 3527 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 3528 return Subtarget.isLikeA9() ? 1 : 20; 3529 } 3530 3531 // CPSR set and branch can be paired in the same cycle. 3532 if (UseMI->isBranch()) 3533 return 0; 3534 3535 // Otherwise it takes the instruction latency (generally one). 3536 unsigned Latency = getInstrLatency(ItinData, DefMI); 3537 3538 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 3539 // its uses. Instructions which are otherwise scheduled between them may 3540 // incur a code size penalty (not able to use the CPSR setting 16-bit 3541 // instructions). 3542 if (Latency > 0 && Subtarget.isThumb2()) { 3543 const MachineFunction *MF = DefMI->getParent()->getParent(); 3544 if (MF->getFunction()->getAttributes(). 3545 hasAttribute(AttributeSet::FunctionIndex, 3546 Attribute::OptimizeForSize)) 3547 --Latency; 3548 } 3549 return Latency; 3550 } 3551 3552 if (DefMO.isImplicit() || UseMI->getOperand(UseIdx).isImplicit()) 3553 return -1; 3554 3555 unsigned DefAlign = DefMI->hasOneMemOperand() 3556 ? (*DefMI->memoperands_begin())->getAlignment() : 0; 3557 unsigned UseAlign = UseMI->hasOneMemOperand() 3558 ? (*UseMI->memoperands_begin())->getAlignment() : 0; 3559 3560 // Get the itinerary's latency if possible, and handle variable_ops. 3561 int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign, 3562 *UseMCID, UseIdx, UseAlign); 3563 // Unable to find operand latency. The caller may resort to getInstrLatency. 3564 if (Latency < 0) 3565 return Latency; 3566 3567 // Adjust for IT block position. 3568 int Adj = DefAdj + UseAdj; 3569 3570 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 3571 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign); 3572 if (Adj >= 0 || (int)Latency > -Adj) { 3573 return Latency + Adj; 3574 } 3575 // Return the itinerary latency, which may be zero but not less than zero. 3576 return Latency; 3577} 3578 3579int 3580ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 3581 SDNode *DefNode, unsigned DefIdx, 3582 SDNode *UseNode, unsigned UseIdx) const { 3583 if (!DefNode->isMachineOpcode()) 3584 return 1; 3585 3586 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 3587 3588 if (isZeroCost(DefMCID.Opcode)) 3589 return 0; 3590 3591 if (!ItinData || ItinData->isEmpty()) 3592 return DefMCID.mayLoad() ? 3 : 1; 3593 3594 if (!UseNode->isMachineOpcode()) { 3595 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 3596 if (Subtarget.isLikeA9() || Subtarget.isSwift()) 3597 return Latency <= 2 ? 1 : Latency - 1; 3598 else 3599 return Latency <= 3 ? 1 : Latency - 2; 3600 } 3601 3602 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 3603 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode); 3604 unsigned DefAlign = !DefMN->memoperands_empty() 3605 ? (*DefMN->memoperands_begin())->getAlignment() : 0; 3606 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode); 3607 unsigned UseAlign = !UseMN->memoperands_empty() 3608 ? (*UseMN->memoperands_begin())->getAlignment() : 0; 3609 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 3610 UseMCID, UseIdx, UseAlign); 3611 3612 if (Latency > 1 && 3613 (Subtarget.isCortexA8() || Subtarget.isLikeA9() || 3614 Subtarget.isCortexA7())) { 3615 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 3616 // variants are one cycle cheaper. 3617 switch (DefMCID.getOpcode()) { 3618 default: break; 3619 case ARM::LDRrs: 3620 case ARM::LDRBrs: { 3621 unsigned ShOpVal = 3622 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 3623 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3624 if (ShImm == 0 || 3625 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 3626 --Latency; 3627 break; 3628 } 3629 case ARM::t2LDRs: 3630 case ARM::t2LDRBs: 3631 case ARM::t2LDRHs: 3632 case ARM::t2LDRSHs: { 3633 // Thumb2 mode: lsl only. 3634 unsigned ShAmt = 3635 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 3636 if (ShAmt == 0 || ShAmt == 2) 3637 --Latency; 3638 break; 3639 } 3640 } 3641 } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) { 3642 // FIXME: Properly handle all of the latency adjustments for address 3643 // writeback. 3644 switch (DefMCID.getOpcode()) { 3645 default: break; 3646 case ARM::LDRrs: 3647 case ARM::LDRBrs: { 3648 unsigned ShOpVal = 3649 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 3650 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3651 if (ShImm == 0 || 3652 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3653 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 3654 Latency -= 2; 3655 else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 3656 --Latency; 3657 break; 3658 } 3659 case ARM::t2LDRs: 3660 case ARM::t2LDRBs: 3661 case ARM::t2LDRHs: 3662 case ARM::t2LDRSHs: { 3663 // Thumb2 mode: lsl 0-3 only. 3664 Latency -= 2; 3665 break; 3666 } 3667 } 3668 } 3669 3670 if (DefAlign < 8 && Subtarget.isLikeA9()) 3671 switch (DefMCID.getOpcode()) { 3672 default: break; 3673 case ARM::VLD1q8: 3674 case ARM::VLD1q16: 3675 case ARM::VLD1q32: 3676 case ARM::VLD1q64: 3677 case ARM::VLD1q8wb_register: 3678 case ARM::VLD1q16wb_register: 3679 case ARM::VLD1q32wb_register: 3680 case ARM::VLD1q64wb_register: 3681 case ARM::VLD1q8wb_fixed: 3682 case ARM::VLD1q16wb_fixed: 3683 case ARM::VLD1q32wb_fixed: 3684 case ARM::VLD1q64wb_fixed: 3685 case ARM::VLD2d8: 3686 case ARM::VLD2d16: 3687 case ARM::VLD2d32: 3688 case ARM::VLD2q8Pseudo: 3689 case ARM::VLD2q16Pseudo: 3690 case ARM::VLD2q32Pseudo: 3691 case ARM::VLD2d8wb_fixed: 3692 case ARM::VLD2d16wb_fixed: 3693 case ARM::VLD2d32wb_fixed: 3694 case ARM::VLD2q8PseudoWB_fixed: 3695 case ARM::VLD2q16PseudoWB_fixed: 3696 case ARM::VLD2q32PseudoWB_fixed: 3697 case ARM::VLD2d8wb_register: 3698 case ARM::VLD2d16wb_register: 3699 case ARM::VLD2d32wb_register: 3700 case ARM::VLD2q8PseudoWB_register: 3701 case ARM::VLD2q16PseudoWB_register: 3702 case ARM::VLD2q32PseudoWB_register: 3703 case ARM::VLD3d8Pseudo: 3704 case ARM::VLD3d16Pseudo: 3705 case ARM::VLD3d32Pseudo: 3706 case ARM::VLD1d64TPseudo: 3707 case ARM::VLD1d64TPseudoWB_fixed: 3708 case ARM::VLD3d8Pseudo_UPD: 3709 case ARM::VLD3d16Pseudo_UPD: 3710 case ARM::VLD3d32Pseudo_UPD: 3711 case ARM::VLD3q8Pseudo_UPD: 3712 case ARM::VLD3q16Pseudo_UPD: 3713 case ARM::VLD3q32Pseudo_UPD: 3714 case ARM::VLD3q8oddPseudo: 3715 case ARM::VLD3q16oddPseudo: 3716 case ARM::VLD3q32oddPseudo: 3717 case ARM::VLD3q8oddPseudo_UPD: 3718 case ARM::VLD3q16oddPseudo_UPD: 3719 case ARM::VLD3q32oddPseudo_UPD: 3720 case ARM::VLD4d8Pseudo: 3721 case ARM::VLD4d16Pseudo: 3722 case ARM::VLD4d32Pseudo: 3723 case ARM::VLD1d64QPseudo: 3724 case ARM::VLD1d64QPseudoWB_fixed: 3725 case ARM::VLD4d8Pseudo_UPD: 3726 case ARM::VLD4d16Pseudo_UPD: 3727 case ARM::VLD4d32Pseudo_UPD: 3728 case ARM::VLD4q8Pseudo_UPD: 3729 case ARM::VLD4q16Pseudo_UPD: 3730 case ARM::VLD4q32Pseudo_UPD: 3731 case ARM::VLD4q8oddPseudo: 3732 case ARM::VLD4q16oddPseudo: 3733 case ARM::VLD4q32oddPseudo: 3734 case ARM::VLD4q8oddPseudo_UPD: 3735 case ARM::VLD4q16oddPseudo_UPD: 3736 case ARM::VLD4q32oddPseudo_UPD: 3737 case ARM::VLD1DUPq8: 3738 case ARM::VLD1DUPq16: 3739 case ARM::VLD1DUPq32: 3740 case ARM::VLD1DUPq8wb_fixed: 3741 case ARM::VLD1DUPq16wb_fixed: 3742 case ARM::VLD1DUPq32wb_fixed: 3743 case ARM::VLD1DUPq8wb_register: 3744 case ARM::VLD1DUPq16wb_register: 3745 case ARM::VLD1DUPq32wb_register: 3746 case ARM::VLD2DUPd8: 3747 case ARM::VLD2DUPd16: 3748 case ARM::VLD2DUPd32: 3749 case ARM::VLD2DUPd8wb_fixed: 3750 case ARM::VLD2DUPd16wb_fixed: 3751 case ARM::VLD2DUPd32wb_fixed: 3752 case ARM::VLD2DUPd8wb_register: 3753 case ARM::VLD2DUPd16wb_register: 3754 case ARM::VLD2DUPd32wb_register: 3755 case ARM::VLD4DUPd8Pseudo: 3756 case ARM::VLD4DUPd16Pseudo: 3757 case ARM::VLD4DUPd32Pseudo: 3758 case ARM::VLD4DUPd8Pseudo_UPD: 3759 case ARM::VLD4DUPd16Pseudo_UPD: 3760 case ARM::VLD4DUPd32Pseudo_UPD: 3761 case ARM::VLD1LNq8Pseudo: 3762 case ARM::VLD1LNq16Pseudo: 3763 case ARM::VLD1LNq32Pseudo: 3764 case ARM::VLD1LNq8Pseudo_UPD: 3765 case ARM::VLD1LNq16Pseudo_UPD: 3766 case ARM::VLD1LNq32Pseudo_UPD: 3767 case ARM::VLD2LNd8Pseudo: 3768 case ARM::VLD2LNd16Pseudo: 3769 case ARM::VLD2LNd32Pseudo: 3770 case ARM::VLD2LNq16Pseudo: 3771 case ARM::VLD2LNq32Pseudo: 3772 case ARM::VLD2LNd8Pseudo_UPD: 3773 case ARM::VLD2LNd16Pseudo_UPD: 3774 case ARM::VLD2LNd32Pseudo_UPD: 3775 case ARM::VLD2LNq16Pseudo_UPD: 3776 case ARM::VLD2LNq32Pseudo_UPD: 3777 case ARM::VLD4LNd8Pseudo: 3778 case ARM::VLD4LNd16Pseudo: 3779 case ARM::VLD4LNd32Pseudo: 3780 case ARM::VLD4LNq16Pseudo: 3781 case ARM::VLD4LNq32Pseudo: 3782 case ARM::VLD4LNd8Pseudo_UPD: 3783 case ARM::VLD4LNd16Pseudo_UPD: 3784 case ARM::VLD4LNd32Pseudo_UPD: 3785 case ARM::VLD4LNq16Pseudo_UPD: 3786 case ARM::VLD4LNq32Pseudo_UPD: 3787 // If the address is not 64-bit aligned, the latencies of these 3788 // instructions increases by one. 3789 ++Latency; 3790 break; 3791 } 3792 3793 return Latency; 3794} 3795 3796unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr *MI) const { 3797 if (MI->isCopyLike() || MI->isInsertSubreg() || 3798 MI->isRegSequence() || MI->isImplicitDef()) 3799 return 0; 3800 3801 if (MI->isBundle()) 3802 return 0; 3803 3804 const MCInstrDesc &MCID = MI->getDesc(); 3805 3806 if (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) { 3807 // When predicated, CPSR is an additional source operand for CPSR updating 3808 // instructions, this apparently increases their latencies. 3809 return 1; 3810 } 3811 return 0; 3812} 3813 3814unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 3815 const MachineInstr *MI, 3816 unsigned *PredCost) const { 3817 if (MI->isCopyLike() || MI->isInsertSubreg() || 3818 MI->isRegSequence() || MI->isImplicitDef()) 3819 return 1; 3820 3821 // An instruction scheduler typically runs on unbundled instructions, however 3822 // other passes may query the latency of a bundled instruction. 3823 if (MI->isBundle()) { 3824 unsigned Latency = 0; 3825 MachineBasicBlock::const_instr_iterator I = MI; 3826 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 3827 while (++I != E && I->isInsideBundle()) { 3828 if (I->getOpcode() != ARM::t2IT) 3829 Latency += getInstrLatency(ItinData, I, PredCost); 3830 } 3831 return Latency; 3832 } 3833 3834 const MCInstrDesc &MCID = MI->getDesc(); 3835 if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) { 3836 // When predicated, CPSR is an additional source operand for CPSR updating 3837 // instructions, this apparently increases their latencies. 3838 *PredCost = 1; 3839 } 3840 // Be sure to call getStageLatency for an empty itinerary in case it has a 3841 // valid MinLatency property. 3842 if (!ItinData) 3843 return MI->mayLoad() ? 3 : 1; 3844 3845 unsigned Class = MCID.getSchedClass(); 3846 3847 // For instructions with variable uops, use uops as latency. 3848 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0) 3849 return getNumMicroOps(ItinData, MI); 3850 3851 // For the common case, fall back on the itinerary's latency. 3852 unsigned Latency = ItinData->getStageLatency(Class); 3853 3854 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 3855 unsigned DefAlign = MI->hasOneMemOperand() 3856 ? (*MI->memoperands_begin())->getAlignment() : 0; 3857 int Adj = adjustDefLatency(Subtarget, MI, &MCID, DefAlign); 3858 if (Adj >= 0 || (int)Latency > -Adj) { 3859 return Latency + Adj; 3860 } 3861 return Latency; 3862} 3863 3864int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 3865 SDNode *Node) const { 3866 if (!Node->isMachineOpcode()) 3867 return 1; 3868 3869 if (!ItinData || ItinData->isEmpty()) 3870 return 1; 3871 3872 unsigned Opcode = Node->getMachineOpcode(); 3873 switch (Opcode) { 3874 default: 3875 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 3876 case ARM::VLDMQIA: 3877 case ARM::VSTMQIA: 3878 return 2; 3879 } 3880} 3881 3882bool ARMBaseInstrInfo:: 3883hasHighOperandLatency(const InstrItineraryData *ItinData, 3884 const MachineRegisterInfo *MRI, 3885 const MachineInstr *DefMI, unsigned DefIdx, 3886 const MachineInstr *UseMI, unsigned UseIdx) const { 3887 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 3888 unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask; 3889 if (Subtarget.isCortexA8() && 3890 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 3891 // CortexA8 VFP instructions are not pipelined. 3892 return true; 3893 3894 // Hoist VFP / NEON instructions with 4 or higher latency. 3895 int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); 3896 if (Latency < 0) 3897 Latency = getInstrLatency(ItinData, DefMI); 3898 if (Latency <= 3) 3899 return false; 3900 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 3901 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 3902} 3903 3904bool ARMBaseInstrInfo:: 3905hasLowDefLatency(const InstrItineraryData *ItinData, 3906 const MachineInstr *DefMI, unsigned DefIdx) const { 3907 if (!ItinData || ItinData->isEmpty()) 3908 return false; 3909 3910 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 3911 if (DDomain == ARMII::DomainGeneral) { 3912 unsigned DefClass = DefMI->getDesc().getSchedClass(); 3913 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 3914 return (DefCycle != -1 && DefCycle <= 2); 3915 } 3916 return false; 3917} 3918 3919bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI, 3920 StringRef &ErrInfo) const { 3921 if (convertAddSubFlagsOpcode(MI->getOpcode())) { 3922 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 3923 return false; 3924 } 3925 return true; 3926} 3927 3928bool 3929ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 3930 unsigned &AddSubOpc, 3931 bool &NegAcc, bool &HasLane) const { 3932 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 3933 if (I == MLxEntryMap.end()) 3934 return false; 3935 3936 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 3937 MulOpc = Entry.MulOpc; 3938 AddSubOpc = Entry.AddSubOpc; 3939 NegAcc = Entry.NegAcc; 3940 HasLane = Entry.HasLane; 3941 return true; 3942} 3943 3944//===----------------------------------------------------------------------===// 3945// Execution domains. 3946//===----------------------------------------------------------------------===// 3947// 3948// Some instructions go down the NEON pipeline, some go down the VFP pipeline, 3949// and some can go down both. The vmov instructions go down the VFP pipeline, 3950// but they can be changed to vorr equivalents that are executed by the NEON 3951// pipeline. 3952// 3953// We use the following execution domain numbering: 3954// 3955enum ARMExeDomain { 3956 ExeGeneric = 0, 3957 ExeVFP = 1, 3958 ExeNEON = 2 3959}; 3960// 3961// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 3962// 3963std::pair<uint16_t, uint16_t> 3964ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { 3965 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON 3966 // if they are not predicated. 3967 if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) 3968 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); 3969 3970 // CortexA9 is particularly picky about mixing the two and wants these 3971 // converted. 3972 if (Subtarget.isCortexA9() && !isPredicated(MI) && 3973 (MI->getOpcode() == ARM::VMOVRS || 3974 MI->getOpcode() == ARM::VMOVSR || 3975 MI->getOpcode() == ARM::VMOVS)) 3976 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); 3977 3978 // No other instructions can be swizzled, so just determine their domain. 3979 unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask; 3980 3981 if (Domain & ARMII::DomainNEON) 3982 return std::make_pair(ExeNEON, 0); 3983 3984 // Certain instructions can go either way on Cortex-A8. 3985 // Treat them as NEON instructions. 3986 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 3987 return std::make_pair(ExeNEON, 0); 3988 3989 if (Domain & ARMII::DomainVFP) 3990 return std::make_pair(ExeVFP, 0); 3991 3992 return std::make_pair(ExeGeneric, 0); 3993} 3994 3995static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, 3996 unsigned SReg, unsigned &Lane) { 3997 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); 3998 Lane = 0; 3999 4000 if (DReg != ARM::NoRegister) 4001 return DReg; 4002 4003 Lane = 1; 4004 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); 4005 4006 assert(DReg && "S-register with no D super-register?"); 4007 return DReg; 4008} 4009 4010/// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, 4011/// set ImplicitSReg to a register number that must be marked as implicit-use or 4012/// zero if no register needs to be defined as implicit-use. 4013/// 4014/// If the function cannot determine if an SPR should be marked implicit use or 4015/// not, it returns false. 4016/// 4017/// This function handles cases where an instruction is being modified from taking 4018/// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict 4019/// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other 4020/// lane of the DPR). 4021/// 4022/// If the other SPR is defined, an implicit-use of it should be added. Else, 4023/// (including the case where the DPR itself is defined), it should not. 4024/// 4025static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, 4026 MachineInstr *MI, 4027 unsigned DReg, unsigned Lane, 4028 unsigned &ImplicitSReg) { 4029 // If the DPR is defined or used already, the other SPR lane will be chained 4030 // correctly, so there is nothing to be done. 4031 if (MI->definesRegister(DReg, TRI) || MI->readsRegister(DReg, TRI)) { 4032 ImplicitSReg = 0; 4033 return true; 4034 } 4035 4036 // Otherwise we need to go searching to see if the SPR is set explicitly. 4037 ImplicitSReg = TRI->getSubReg(DReg, 4038 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); 4039 MachineBasicBlock::LivenessQueryResult LQR = 4040 MI->getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI); 4041 4042 if (LQR == MachineBasicBlock::LQR_Live) 4043 return true; 4044 else if (LQR == MachineBasicBlock::LQR_Unknown) 4045 return false; 4046 4047 // If the register is known not to be live, there is no need to add an 4048 // implicit-use. 4049 ImplicitSReg = 0; 4050 return true; 4051} 4052 4053void 4054ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 4055 unsigned DstReg, SrcReg, DReg; 4056 unsigned Lane; 4057 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI); 4058 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4059 switch (MI->getOpcode()) { 4060 default: 4061 llvm_unreachable("cannot handle opcode!"); 4062 break; 4063 case ARM::VMOVD: 4064 if (Domain != ExeNEON) 4065 break; 4066 4067 // Zap the predicate operands. 4068 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 4069 4070 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) 4071 DstReg = MI->getOperand(0).getReg(); 4072 SrcReg = MI->getOperand(1).getReg(); 4073 4074 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 4075 MI->RemoveOperand(i-1); 4076 4077 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) 4078 MI->setDesc(get(ARM::VORRd)); 4079 AddDefaultPred(MIB.addReg(DstReg, RegState::Define) 4080 .addReg(SrcReg) 4081 .addReg(SrcReg)); 4082 break; 4083 case ARM::VMOVRS: 4084 if (Domain != ExeNEON) 4085 break; 4086 assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); 4087 4088 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) 4089 DstReg = MI->getOperand(0).getReg(); 4090 SrcReg = MI->getOperand(1).getReg(); 4091 4092 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 4093 MI->RemoveOperand(i-1); 4094 4095 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 4096 4097 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) 4098 // Note that DSrc has been widened and the other lane may be undef, which 4099 // contaminates the entire register. 4100 MI->setDesc(get(ARM::VGETLNi32)); 4101 AddDefaultPred(MIB.addReg(DstReg, RegState::Define) 4102 .addReg(DReg, RegState::Undef) 4103 .addImm(Lane)); 4104 4105 // The old source should be an implicit use, otherwise we might think it 4106 // was dead before here. 4107 MIB.addReg(SrcReg, RegState::Implicit); 4108 break; 4109 case ARM::VMOVSR: { 4110 if (Domain != ExeNEON) 4111 break; 4112 assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); 4113 4114 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) 4115 DstReg = MI->getOperand(0).getReg(); 4116 SrcReg = MI->getOperand(1).getReg(); 4117 4118 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); 4119 4120 unsigned ImplicitSReg; 4121 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) 4122 break; 4123 4124 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 4125 MI->RemoveOperand(i-1); 4126 4127 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) 4128 // Again DDst may be undefined at the beginning of this instruction. 4129 MI->setDesc(get(ARM::VSETLNi32)); 4130 MIB.addReg(DReg, RegState::Define) 4131 .addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI))) 4132 .addReg(SrcReg) 4133 .addImm(Lane); 4134 AddDefaultPred(MIB); 4135 4136 // The narrower destination must be marked as set to keep previous chains 4137 // in place. 4138 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 4139 if (ImplicitSReg != 0) 4140 MIB.addReg(ImplicitSReg, RegState::Implicit); 4141 break; 4142 } 4143 case ARM::VMOVS: { 4144 if (Domain != ExeNEON) 4145 break; 4146 4147 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) 4148 DstReg = MI->getOperand(0).getReg(); 4149 SrcReg = MI->getOperand(1).getReg(); 4150 4151 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; 4152 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); 4153 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); 4154 4155 unsigned ImplicitSReg; 4156 if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg)) 4157 break; 4158 4159 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 4160 MI->RemoveOperand(i-1); 4161 4162 if (DSrc == DDst) { 4163 // Destination can be: 4164 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) 4165 MI->setDesc(get(ARM::VDUPLN32d)); 4166 MIB.addReg(DDst, RegState::Define) 4167 .addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI))) 4168 .addImm(SrcLane); 4169 AddDefaultPred(MIB); 4170 4171 // Neither the source or the destination are naturally represented any 4172 // more, so add them in manually. 4173 MIB.addReg(DstReg, RegState::Implicit | RegState::Define); 4174 MIB.addReg(SrcReg, RegState::Implicit); 4175 if (ImplicitSReg != 0) 4176 MIB.addReg(ImplicitSReg, RegState::Implicit); 4177 break; 4178 } 4179 4180 // In general there's no single instruction that can perform an S <-> S 4181 // move in NEON space, but a pair of VEXT instructions *can* do the 4182 // job. It turns out that the VEXTs needed will only use DSrc once, with 4183 // the position based purely on the combination of lane-0 and lane-1 4184 // involved. For example 4185 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 4186 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 4187 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 4188 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 4189 // 4190 // Pattern of the MachineInstrs is: 4191 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) 4192 MachineInstrBuilder NewMIB; 4193 NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 4194 get(ARM::VEXTd32), DDst); 4195 4196 // On the first instruction, both DSrc and DDst may be <undef> if present. 4197 // Specifically when the original instruction didn't have them as an 4198 // <imp-use>. 4199 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; 4200 bool CurUndef = !MI->readsRegister(CurReg, TRI); 4201 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 4202 4203 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; 4204 CurUndef = !MI->readsRegister(CurReg, TRI); 4205 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 4206 4207 NewMIB.addImm(1); 4208 AddDefaultPred(NewMIB); 4209 4210 if (SrcLane == DstLane) 4211 NewMIB.addReg(SrcReg, RegState::Implicit); 4212 4213 MI->setDesc(get(ARM::VEXTd32)); 4214 MIB.addReg(DDst, RegState::Define); 4215 4216 // On the second instruction, DDst has definitely been defined above, so 4217 // it is not <undef>. DSrc, if present, can be <undef> as above. 4218 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; 4219 CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI); 4220 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 4221 4222 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; 4223 CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI); 4224 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 4225 4226 MIB.addImm(1); 4227 AddDefaultPred(MIB); 4228 4229 if (SrcLane != DstLane) 4230 MIB.addReg(SrcReg, RegState::Implicit); 4231 4232 // As before, the original destination is no longer represented, add it 4233 // implicitly. 4234 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 4235 if (ImplicitSReg != 0) 4236 MIB.addReg(ImplicitSReg, RegState::Implicit); 4237 break; 4238 } 4239 } 4240 4241} 4242 4243//===----------------------------------------------------------------------===// 4244// Partial register updates 4245//===----------------------------------------------------------------------===// 4246// 4247// Swift renames NEON registers with 64-bit granularity. That means any 4248// instruction writing an S-reg implicitly reads the containing D-reg. The 4249// problem is mostly avoided by translating f32 operations to v2f32 operations 4250// on D-registers, but f32 loads are still a problem. 4251// 4252// These instructions can load an f32 into a NEON register: 4253// 4254// VLDRS - Only writes S, partial D update. 4255// VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops. 4256// VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. 4257// 4258// FCONSTD can be used as a dependency-breaking instruction. 4259unsigned ARMBaseInstrInfo:: 4260getPartialRegUpdateClearance(const MachineInstr *MI, 4261 unsigned OpNum, 4262 const TargetRegisterInfo *TRI) const { 4263 if (!SwiftPartialUpdateClearance || 4264 !(Subtarget.isSwift() || Subtarget.isCortexA15())) 4265 return 0; 4266 4267 assert(TRI && "Need TRI instance"); 4268 4269 const MachineOperand &MO = MI->getOperand(OpNum); 4270 if (MO.readsReg()) 4271 return 0; 4272 unsigned Reg = MO.getReg(); 4273 int UseOp = -1; 4274 4275 switch(MI->getOpcode()) { 4276 // Normal instructions writing only an S-register. 4277 case ARM::VLDRS: 4278 case ARM::FCONSTS: 4279 case ARM::VMOVSR: 4280 case ARM::VMOVv8i8: 4281 case ARM::VMOVv4i16: 4282 case ARM::VMOVv2i32: 4283 case ARM::VMOVv2f32: 4284 case ARM::VMOVv1i64: 4285 UseOp = MI->findRegisterUseOperandIdx(Reg, false, TRI); 4286 break; 4287 4288 // Explicitly reads the dependency. 4289 case ARM::VLD1LNd32: 4290 UseOp = 3; 4291 break; 4292 default: 4293 return 0; 4294 } 4295 4296 // If this instruction actually reads a value from Reg, there is no unwanted 4297 // dependency. 4298 if (UseOp != -1 && MI->getOperand(UseOp).readsReg()) 4299 return 0; 4300 4301 // We must be able to clobber the whole D-reg. 4302 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 4303 // Virtual register must be a foo:ssub_0<def,undef> operand. 4304 if (!MO.getSubReg() || MI->readsVirtualRegister(Reg)) 4305 return 0; 4306 } else if (ARM::SPRRegClass.contains(Reg)) { 4307 // Physical register: MI must define the full D-reg. 4308 unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0, 4309 &ARM::DPRRegClass); 4310 if (!DReg || !MI->definesRegister(DReg, TRI)) 4311 return 0; 4312 } 4313 4314 // MI has an unwanted D-register dependency. 4315 // Avoid defs in the previous N instructrions. 4316 return SwiftPartialUpdateClearance; 4317} 4318 4319// Break a partial register dependency after getPartialRegUpdateClearance 4320// returned non-zero. 4321void ARMBaseInstrInfo:: 4322breakPartialRegDependency(MachineBasicBlock::iterator MI, 4323 unsigned OpNum, 4324 const TargetRegisterInfo *TRI) const { 4325 assert(MI && OpNum < MI->getDesc().getNumDefs() && "OpNum is not a def"); 4326 assert(TRI && "Need TRI instance"); 4327 4328 const MachineOperand &MO = MI->getOperand(OpNum); 4329 unsigned Reg = MO.getReg(); 4330 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && 4331 "Can't break virtual register dependencies."); 4332 unsigned DReg = Reg; 4333 4334 // If MI defines an S-reg, find the corresponding D super-register. 4335 if (ARM::SPRRegClass.contains(Reg)) { 4336 DReg = ARM::D0 + (Reg - ARM::S0) / 2; 4337 assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken"); 4338 } 4339 4340 assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps"); 4341 assert(MI->definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg"); 4342 4343 // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines 4344 // the full D-register by loading the same value to both lanes. The 4345 // instruction is micro-coded with 2 uops, so don't do this until we can 4346 // properly schedule micro-coded instructions. The dispatcher stalls cause 4347 // too big regressions. 4348 4349 // Insert the dependency-breaking FCONSTD before MI. 4350 // 96 is the encoding of 0.5, but the actual value doesn't matter here. 4351 AddDefaultPred(BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 4352 get(ARM::FCONSTD), DReg).addImm(96)); 4353 MI->addRegisterKilled(DReg, TRI, true); 4354} 4355 4356bool ARMBaseInstrInfo::hasNOP() const { 4357 return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0; 4358} 4359 4360bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const { 4361 if (MI->getNumOperands() < 4) 4362 return true; 4363 unsigned ShOpVal = MI->getOperand(3).getImm(); 4364 unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); 4365 // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. 4366 if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || 4367 ((ShImm == 1 || ShImm == 2) && 4368 ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) 4369 return true; 4370 4371 return false; 4372} 4373