ARMBaseInstrInfo.cpp revision fce711cb65716f86b4e150f42cbb597bbecf7dbe
1//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the Base ARM implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARMBaseInstrInfo.h" 15#include "ARM.h" 16#include "ARMConstantPoolValue.h" 17#include "ARMHazardRecognizer.h" 18#include "ARMMachineFunctionInfo.h" 19#include "ARMRegisterInfo.h" 20#include "MCTargetDesc/ARMAddressingModes.h" 21#include "llvm/Constants.h" 22#include "llvm/Function.h" 23#include "llvm/GlobalValue.h" 24#include "llvm/CodeGen/LiveVariables.h" 25#include "llvm/CodeGen/MachineConstantPool.h" 26#include "llvm/CodeGen/MachineFrameInfo.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineJumpTableInfo.h" 29#include "llvm/CodeGen/MachineMemOperand.h" 30#include "llvm/CodeGen/MachineRegisterInfo.h" 31#include "llvm/CodeGen/SelectionDAGNodes.h" 32#include "llvm/MC/MCAsmInfo.h" 33#include "llvm/Support/BranchProbability.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/ADT/STLExtras.h" 38 39#define GET_INSTRINFO_CTOR 40#include "ARMGenInstrInfo.inc" 41 42using namespace llvm; 43 44static cl::opt<bool> 45EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 46 cl::desc("Enable ARM 2-addr to 3-addr conv")); 47 48static cl::opt<bool> 49WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true), 50 cl::desc("Widen ARM vmovs to vmovd when possible")); 51 52/// ARM_MLxEntry - Record information about MLA / MLS instructions. 53struct ARM_MLxEntry { 54 unsigned MLxOpc; // MLA / MLS opcode 55 unsigned MulOpc; // Expanded multiplication opcode 56 unsigned AddSubOpc; // Expanded add / sub opcode 57 bool NegAcc; // True if the acc is negated before the add / sub. 58 bool HasLane; // True if instruction has an extra "lane" operand. 59}; 60 61static const ARM_MLxEntry ARM_MLxTable[] = { 62 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 63 // fp scalar ops 64 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 65 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 66 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 67 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 68 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 69 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 70 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 71 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 72 73 // fp SIMD ops 74 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 75 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 76 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 77 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 78 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 79 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 80 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 81 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 82}; 83 84ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 85 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 86 Subtarget(STI) { 87 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 88 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 89 assert(false && "Duplicated entries?"); 90 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 91 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 92 } 93} 94 95// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 96// currently defaults to no prepass hazard recognizer. 97ScheduleHazardRecognizer *ARMBaseInstrInfo:: 98CreateTargetHazardRecognizer(const TargetMachine *TM, 99 const ScheduleDAG *DAG) const { 100 if (usePreRAHazardRecognizer()) { 101 const InstrItineraryData *II = TM->getInstrItineraryData(); 102 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 103 } 104 return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG); 105} 106 107ScheduleHazardRecognizer *ARMBaseInstrInfo:: 108CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 109 const ScheduleDAG *DAG) const { 110 if (Subtarget.isThumb2() || Subtarget.hasVFP2()) 111 return (ScheduleHazardRecognizer *) 112 new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG); 113 return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG); 114} 115 116MachineInstr * 117ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 118 MachineBasicBlock::iterator &MBBI, 119 LiveVariables *LV) const { 120 // FIXME: Thumb2 support. 121 122 if (!EnableARM3Addr) 123 return NULL; 124 125 MachineInstr *MI = MBBI; 126 MachineFunction &MF = *MI->getParent()->getParent(); 127 uint64_t TSFlags = MI->getDesc().TSFlags; 128 bool isPre = false; 129 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 130 default: return NULL; 131 case ARMII::IndexModePre: 132 isPre = true; 133 break; 134 case ARMII::IndexModePost: 135 break; 136 } 137 138 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 139 // operation. 140 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode()); 141 if (MemOpc == 0) 142 return NULL; 143 144 MachineInstr *UpdateMI = NULL; 145 MachineInstr *MemMI = NULL; 146 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 147 const MCInstrDesc &MCID = MI->getDesc(); 148 unsigned NumOps = MCID.getNumOperands(); 149 bool isLoad = !MI->mayStore(); 150 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); 151 const MachineOperand &Base = MI->getOperand(2); 152 const MachineOperand &Offset = MI->getOperand(NumOps-3); 153 unsigned WBReg = WB.getReg(); 154 unsigned BaseReg = Base.getReg(); 155 unsigned OffReg = Offset.getReg(); 156 unsigned OffImm = MI->getOperand(NumOps-2).getImm(); 157 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm(); 158 switch (AddrMode) { 159 default: llvm_unreachable("Unknown indexed op!"); 160 case ARMII::AddrMode2: { 161 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 162 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 163 if (OffReg == 0) { 164 if (ARM_AM::getSOImmVal(Amt) == -1) 165 // Can't encode it in a so_imm operand. This transformation will 166 // add more than 1 instruction. Abandon! 167 return NULL; 168 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 169 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 170 .addReg(BaseReg).addImm(Amt) 171 .addImm(Pred).addReg(0).addReg(0); 172 } else if (Amt != 0) { 173 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 174 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 175 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 176 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 177 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc) 178 .addImm(Pred).addReg(0).addReg(0); 179 } else 180 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 181 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 182 .addReg(BaseReg).addReg(OffReg) 183 .addImm(Pred).addReg(0).addReg(0); 184 break; 185 } 186 case ARMII::AddrMode3 : { 187 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 188 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 189 if (OffReg == 0) 190 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 191 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 192 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 193 .addReg(BaseReg).addImm(Amt) 194 .addImm(Pred).addReg(0).addReg(0); 195 else 196 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 197 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 198 .addReg(BaseReg).addReg(OffReg) 199 .addImm(Pred).addReg(0).addReg(0); 200 break; 201 } 202 } 203 204 std::vector<MachineInstr*> NewMIs; 205 if (isPre) { 206 if (isLoad) 207 MemMI = BuildMI(MF, MI->getDebugLoc(), 208 get(MemOpc), MI->getOperand(0).getReg()) 209 .addReg(WBReg).addImm(0).addImm(Pred); 210 else 211 MemMI = BuildMI(MF, MI->getDebugLoc(), 212 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 213 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred); 214 NewMIs.push_back(MemMI); 215 NewMIs.push_back(UpdateMI); 216 } else { 217 if (isLoad) 218 MemMI = BuildMI(MF, MI->getDebugLoc(), 219 get(MemOpc), MI->getOperand(0).getReg()) 220 .addReg(BaseReg).addImm(0).addImm(Pred); 221 else 222 MemMI = BuildMI(MF, MI->getDebugLoc(), 223 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 224 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred); 225 if (WB.isDead()) 226 UpdateMI->getOperand(0).setIsDead(); 227 NewMIs.push_back(UpdateMI); 228 NewMIs.push_back(MemMI); 229 } 230 231 // Transfer LiveVariables states, kill / dead info. 232 if (LV) { 233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 234 MachineOperand &MO = MI->getOperand(i); 235 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 236 unsigned Reg = MO.getReg(); 237 238 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 239 if (MO.isDef()) { 240 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 241 if (MO.isDead()) 242 LV->addVirtualRegisterDead(Reg, NewMI); 243 } 244 if (MO.isUse() && MO.isKill()) { 245 for (unsigned j = 0; j < 2; ++j) { 246 // Look at the two new MI's in reverse order. 247 MachineInstr *NewMI = NewMIs[j]; 248 if (!NewMI->readsRegister(Reg)) 249 continue; 250 LV->addVirtualRegisterKilled(Reg, NewMI); 251 if (VI.removeKill(MI)) 252 VI.Kills.push_back(NewMI); 253 break; 254 } 255 } 256 } 257 } 258 } 259 260 MFI->insert(MBBI, NewMIs[1]); 261 MFI->insert(MBBI, NewMIs[0]); 262 return NewMIs[0]; 263} 264 265// Branch analysis. 266bool 267ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 268 MachineBasicBlock *&FBB, 269 SmallVectorImpl<MachineOperand> &Cond, 270 bool AllowModify) const { 271 // If the block has no terminators, it just falls into the block after it. 272 MachineBasicBlock::iterator I = MBB.end(); 273 if (I == MBB.begin()) 274 return false; 275 --I; 276 while (I->isDebugValue()) { 277 if (I == MBB.begin()) 278 return false; 279 --I; 280 } 281 if (!isUnpredicatedTerminator(I)) 282 return false; 283 284 // Get the last instruction in the block. 285 MachineInstr *LastInst = I; 286 287 // If there is only one terminator instruction, process it. 288 unsigned LastOpc = LastInst->getOpcode(); 289 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 290 if (isUncondBranchOpcode(LastOpc)) { 291 TBB = LastInst->getOperand(0).getMBB(); 292 return false; 293 } 294 if (isCondBranchOpcode(LastOpc)) { 295 // Block ends with fall-through condbranch. 296 TBB = LastInst->getOperand(0).getMBB(); 297 Cond.push_back(LastInst->getOperand(1)); 298 Cond.push_back(LastInst->getOperand(2)); 299 return false; 300 } 301 return true; // Can't handle indirect branch. 302 } 303 304 // Get the instruction before it if it is a terminator. 305 MachineInstr *SecondLastInst = I; 306 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 307 308 // If AllowModify is true and the block ends with two or more unconditional 309 // branches, delete all but the first unconditional branch. 310 if (AllowModify && isUncondBranchOpcode(LastOpc)) { 311 while (isUncondBranchOpcode(SecondLastOpc)) { 312 LastInst->eraseFromParent(); 313 LastInst = SecondLastInst; 314 LastOpc = LastInst->getOpcode(); 315 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 316 // Return now the only terminator is an unconditional branch. 317 TBB = LastInst->getOperand(0).getMBB(); 318 return false; 319 } else { 320 SecondLastInst = I; 321 SecondLastOpc = SecondLastInst->getOpcode(); 322 } 323 } 324 } 325 326 // If there are three terminators, we don't know what sort of block this is. 327 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) 328 return true; 329 330 // If the block ends with a B and a Bcc, handle it. 331 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 332 TBB = SecondLastInst->getOperand(0).getMBB(); 333 Cond.push_back(SecondLastInst->getOperand(1)); 334 Cond.push_back(SecondLastInst->getOperand(2)); 335 FBB = LastInst->getOperand(0).getMBB(); 336 return false; 337 } 338 339 // If the block ends with two unconditional branches, handle it. The second 340 // one is not executed, so remove it. 341 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 342 TBB = SecondLastInst->getOperand(0).getMBB(); 343 I = LastInst; 344 if (AllowModify) 345 I->eraseFromParent(); 346 return false; 347 } 348 349 // ...likewise if it ends with a branch table followed by an unconditional 350 // branch. The branch folder can create these, and we must get rid of them for 351 // correctness of Thumb constant islands. 352 if ((isJumpTableBranchOpcode(SecondLastOpc) || 353 isIndirectBranchOpcode(SecondLastOpc)) && 354 isUncondBranchOpcode(LastOpc)) { 355 I = LastInst; 356 if (AllowModify) 357 I->eraseFromParent(); 358 return true; 359 } 360 361 // Otherwise, can't handle this. 362 return true; 363} 364 365 366unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 367 MachineBasicBlock::iterator I = MBB.end(); 368 if (I == MBB.begin()) return 0; 369 --I; 370 while (I->isDebugValue()) { 371 if (I == MBB.begin()) 372 return 0; 373 --I; 374 } 375 if (!isUncondBranchOpcode(I->getOpcode()) && 376 !isCondBranchOpcode(I->getOpcode())) 377 return 0; 378 379 // Remove the branch. 380 I->eraseFromParent(); 381 382 I = MBB.end(); 383 384 if (I == MBB.begin()) return 1; 385 --I; 386 if (!isCondBranchOpcode(I->getOpcode())) 387 return 1; 388 389 // Remove the branch. 390 I->eraseFromParent(); 391 return 2; 392} 393 394unsigned 395ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 396 MachineBasicBlock *FBB, 397 const SmallVectorImpl<MachineOperand> &Cond, 398 DebugLoc DL) const { 399 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 400 int BOpc = !AFI->isThumbFunction() 401 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 402 int BccOpc = !AFI->isThumbFunction() 403 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 404 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 405 406 // Shouldn't be a fall through. 407 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 408 assert((Cond.size() == 2 || Cond.size() == 0) && 409 "ARM branch conditions have two components!"); 410 411 if (FBB == 0) { 412 if (Cond.empty()) { // Unconditional branch? 413 if (isThumb) 414 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0); 415 else 416 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 417 } else 418 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 419 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 420 return 1; 421 } 422 423 // Two-way conditional branch. 424 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 425 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 426 if (isThumb) 427 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); 428 else 429 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 430 return 2; 431} 432 433bool ARMBaseInstrInfo:: 434ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 435 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 436 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 437 return false; 438} 439 440bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const { 441 if (MI->isBundle()) { 442 MachineBasicBlock::const_instr_iterator I = MI; 443 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 444 while (++I != E && I->isInsideBundle()) { 445 int PIdx = I->findFirstPredOperandIdx(); 446 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 447 return true; 448 } 449 return false; 450 } 451 452 int PIdx = MI->findFirstPredOperandIdx(); 453 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL; 454} 455 456bool ARMBaseInstrInfo:: 457PredicateInstruction(MachineInstr *MI, 458 const SmallVectorImpl<MachineOperand> &Pred) const { 459 unsigned Opc = MI->getOpcode(); 460 if (isUncondBranchOpcode(Opc)) { 461 MI->setDesc(get(getMatchingCondBranchOpcode(Opc))); 462 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm())); 463 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false)); 464 return true; 465 } 466 467 int PIdx = MI->findFirstPredOperandIdx(); 468 if (PIdx != -1) { 469 MachineOperand &PMO = MI->getOperand(PIdx); 470 PMO.setImm(Pred[0].getImm()); 471 MI->getOperand(PIdx+1).setReg(Pred[1].getReg()); 472 return true; 473 } 474 return false; 475} 476 477bool ARMBaseInstrInfo:: 478SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 479 const SmallVectorImpl<MachineOperand> &Pred2) const { 480 if (Pred1.size() > 2 || Pred2.size() > 2) 481 return false; 482 483 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 484 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 485 if (CC1 == CC2) 486 return true; 487 488 switch (CC1) { 489 default: 490 return false; 491 case ARMCC::AL: 492 return true; 493 case ARMCC::HS: 494 return CC2 == ARMCC::HI; 495 case ARMCC::LS: 496 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 497 case ARMCC::GE: 498 return CC2 == ARMCC::GT; 499 case ARMCC::LE: 500 return CC2 == ARMCC::LT; 501 } 502} 503 504bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, 505 std::vector<MachineOperand> &Pred) const { 506 bool Found = false; 507 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 508 const MachineOperand &MO = MI->getOperand(i); 509 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) || 510 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) { 511 Pred.push_back(MO); 512 Found = true; 513 } 514 } 515 516 return Found; 517} 518 519/// isPredicable - Return true if the specified instruction can be predicated. 520/// By default, this returns true for every instruction with a 521/// PredicateOperand. 522bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { 523 if (!MI->isPredicable()) 524 return false; 525 526 if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { 527 ARMFunctionInfo *AFI = 528 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>(); 529 return AFI->isThumb2Function(); 530 } 531 return true; 532} 533 534/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing. 535LLVM_ATTRIBUTE_NOINLINE 536static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 537 unsigned JTI); 538static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 539 unsigned JTI) { 540 assert(JTI < JT.size()); 541 return JT[JTI].MBBs.size(); 542} 543 544/// GetInstSize - Return the size of the specified MachineInstr. 545/// 546unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 547 const MachineBasicBlock &MBB = *MI->getParent(); 548 const MachineFunction *MF = MBB.getParent(); 549 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 550 551 const MCInstrDesc &MCID = MI->getDesc(); 552 if (MCID.getSize()) 553 return MCID.getSize(); 554 555 // If this machine instr is an inline asm, measure it. 556 if (MI->getOpcode() == ARM::INLINEASM) 557 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI); 558 if (MI->isLabel()) 559 return 0; 560 unsigned Opc = MI->getOpcode(); 561 switch (Opc) { 562 case TargetOpcode::IMPLICIT_DEF: 563 case TargetOpcode::KILL: 564 case TargetOpcode::PROLOG_LABEL: 565 case TargetOpcode::EH_LABEL: 566 case TargetOpcode::DBG_VALUE: 567 return 0; 568 case TargetOpcode::BUNDLE: 569 return getInstBundleLength(MI); 570 case ARM::MOVi16_ga_pcrel: 571 case ARM::MOVTi16_ga_pcrel: 572 case ARM::t2MOVi16_ga_pcrel: 573 case ARM::t2MOVTi16_ga_pcrel: 574 return 4; 575 case ARM::MOVi32imm: 576 case ARM::t2MOVi32imm: 577 return 8; 578 case ARM::CONSTPOOL_ENTRY: 579 // If this machine instr is a constant pool entry, its size is recorded as 580 // operand #2. 581 return MI->getOperand(2).getImm(); 582 case ARM::Int_eh_sjlj_longjmp: 583 return 16; 584 case ARM::tInt_eh_sjlj_longjmp: 585 return 10; 586 case ARM::Int_eh_sjlj_setjmp: 587 case ARM::Int_eh_sjlj_setjmp_nofp: 588 return 20; 589 case ARM::tInt_eh_sjlj_setjmp: 590 case ARM::t2Int_eh_sjlj_setjmp: 591 case ARM::t2Int_eh_sjlj_setjmp_nofp: 592 return 12; 593 case ARM::BR_JTr: 594 case ARM::BR_JTm: 595 case ARM::BR_JTadd: 596 case ARM::tBR_JTr: 597 case ARM::t2BR_JT: 598 case ARM::t2TBB_JT: 599 case ARM::t2TBH_JT: { 600 // These are jumptable branches, i.e. a branch followed by an inlined 601 // jumptable. The size is 4 + 4 * number of entries. For TBB, each 602 // entry is one byte; TBH two byte each. 603 unsigned EntrySize = (Opc == ARM::t2TBB_JT) 604 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); 605 unsigned NumOps = MCID.getNumOperands(); 606 MachineOperand JTOP = 607 MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2)); 608 unsigned JTI = JTOP.getIndex(); 609 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 610 assert(MJTI != 0); 611 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 612 assert(JTI < JT.size()); 613 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte 614 // 4 aligned. The assembler / linker may add 2 byte padding just before 615 // the JT entries. The size does not include this padding; the 616 // constant islands pass does separate bookkeeping for it. 617 // FIXME: If we know the size of the function is less than (1 << 16) *2 618 // bytes, we can use 16-bit entries instead. Then there won't be an 619 // alignment issue. 620 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4; 621 unsigned NumEntries = getNumJTEntries(JT, JTI); 622 if (Opc == ARM::t2TBB_JT && (NumEntries & 1)) 623 // Make sure the instruction that follows TBB is 2-byte aligned. 624 // FIXME: Constant island pass should insert an "ALIGN" instruction 625 // instead. 626 ++NumEntries; 627 return NumEntries * EntrySize + InstSize; 628 } 629 default: 630 // Otherwise, pseudo-instruction sizes are zero. 631 return 0; 632 } 633} 634 635unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const { 636 unsigned Size = 0; 637 MachineBasicBlock::const_instr_iterator I = MI; 638 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 639 while (++I != E && I->isInsideBundle()) { 640 assert(!I->isBundle() && "No nested bundle!"); 641 Size += GetInstSizeInBytes(&*I); 642 } 643 return Size; 644} 645 646void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 647 MachineBasicBlock::iterator I, DebugLoc DL, 648 unsigned DestReg, unsigned SrcReg, 649 bool KillSrc) const { 650 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 651 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 652 653 if (GPRDest && GPRSrc) { 654 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 655 .addReg(SrcReg, getKillRegState(KillSrc)))); 656 return; 657 } 658 659 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 660 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 661 662 unsigned Opc = 0; 663 if (SPRDest && SPRSrc) 664 Opc = ARM::VMOVS; 665 else if (GPRDest && SPRSrc) 666 Opc = ARM::VMOVRS; 667 else if (SPRDest && GPRSrc) 668 Opc = ARM::VMOVSR; 669 else if (ARM::DPRRegClass.contains(DestReg, SrcReg)) 670 Opc = ARM::VMOVD; 671 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 672 Opc = ARM::VORRq; 673 674 if (Opc) { 675 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 676 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 677 if (Opc == ARM::VORRq) 678 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 679 AddDefaultPred(MIB); 680 return; 681 } 682 683 // Generate instructions for VMOVQQ and VMOVQQQQ pseudos in place. 684 if (ARM::QQPRRegClass.contains(DestReg, SrcReg) || 685 ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { 686 const TargetRegisterInfo *TRI = &getRegisterInfo(); 687 assert(ARM::qsub_0 + 3 == ARM::qsub_3 && "Expected contiguous enum."); 688 unsigned EndSubReg = ARM::QQPRRegClass.contains(DestReg, SrcReg) ? 689 ARM::qsub_1 : ARM::qsub_3; 690 for (unsigned i = ARM::qsub_0, e = EndSubReg + 1; i != e; ++i) { 691 unsigned Dst = TRI->getSubReg(DestReg, i); 692 unsigned Src = TRI->getSubReg(SrcReg, i); 693 MachineInstrBuilder Mov = 694 AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VORRq)) 695 .addReg(Dst, RegState::Define) 696 .addReg(Src, getKillRegState(KillSrc)) 697 .addReg(Src, getKillRegState(KillSrc))); 698 if (i == EndSubReg) { 699 Mov->addRegisterDefined(DestReg, TRI); 700 if (KillSrc) 701 Mov->addRegisterKilled(SrcReg, TRI); 702 } 703 } 704 return; 705 } 706 llvm_unreachable("Impossible reg-to-reg copy"); 707} 708 709static const 710MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, 711 unsigned Reg, unsigned SubIdx, unsigned State, 712 const TargetRegisterInfo *TRI) { 713 if (!SubIdx) 714 return MIB.addReg(Reg, State); 715 716 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 717 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 718 return MIB.addReg(Reg, State, SubIdx); 719} 720 721void ARMBaseInstrInfo:: 722storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 723 unsigned SrcReg, bool isKill, int FI, 724 const TargetRegisterClass *RC, 725 const TargetRegisterInfo *TRI) const { 726 DebugLoc DL; 727 if (I != MBB.end()) DL = I->getDebugLoc(); 728 MachineFunction &MF = *MBB.getParent(); 729 MachineFrameInfo &MFI = *MF.getFrameInfo(); 730 unsigned Align = MFI.getObjectAlignment(FI); 731 732 MachineMemOperand *MMO = 733 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 734 MachineMemOperand::MOStore, 735 MFI.getObjectSize(FI), 736 Align); 737 738 switch (RC->getSize()) { 739 case 4: 740 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 741 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) 742 .addReg(SrcReg, getKillRegState(isKill)) 743 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 744 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 745 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) 746 .addReg(SrcReg, getKillRegState(isKill)) 747 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 748 } else 749 llvm_unreachable("Unknown reg class!"); 750 break; 751 case 8: 752 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 753 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) 754 .addReg(SrcReg, getKillRegState(isKill)) 755 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 756 } else 757 llvm_unreachable("Unknown reg class!"); 758 break; 759 case 16: 760 if (ARM::QPRRegClass.hasSubClassEq(RC)) { 761 // Use aligned spills if the stack can be realigned. 762 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 763 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo)) 764 .addFrameIndex(FI).addImm(16) 765 .addReg(SrcReg, getKillRegState(isKill)) 766 .addMemOperand(MMO)); 767 } else { 768 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) 769 .addReg(SrcReg, getKillRegState(isKill)) 770 .addFrameIndex(FI) 771 .addMemOperand(MMO)); 772 } 773 } else 774 llvm_unreachable("Unknown reg class!"); 775 break; 776 case 32: 777 if (ARM::QQPRRegClass.hasSubClassEq(RC)) { 778 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 779 // FIXME: It's possible to only store part of the QQ register if the 780 // spilled def has a sub-register index. 781 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) 782 .addFrameIndex(FI).addImm(16) 783 .addReg(SrcReg, getKillRegState(isKill)) 784 .addMemOperand(MMO)); 785 } else { 786 MachineInstrBuilder MIB = 787 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 788 .addFrameIndex(FI)) 789 .addMemOperand(MMO); 790 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 791 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 792 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 793 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 794 } 795 } else 796 llvm_unreachable("Unknown reg class!"); 797 break; 798 case 64: 799 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 800 MachineInstrBuilder MIB = 801 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 802 .addFrameIndex(FI)) 803 .addMemOperand(MMO); 804 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 805 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 806 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 807 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 808 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 809 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 810 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 811 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 812 } else 813 llvm_unreachable("Unknown reg class!"); 814 break; 815 default: 816 llvm_unreachable("Unknown reg class!"); 817 } 818} 819 820unsigned 821ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 822 int &FrameIndex) const { 823 switch (MI->getOpcode()) { 824 default: break; 825 case ARM::STRrs: 826 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 827 if (MI->getOperand(1).isFI() && 828 MI->getOperand(2).isReg() && 829 MI->getOperand(3).isImm() && 830 MI->getOperand(2).getReg() == 0 && 831 MI->getOperand(3).getImm() == 0) { 832 FrameIndex = MI->getOperand(1).getIndex(); 833 return MI->getOperand(0).getReg(); 834 } 835 break; 836 case ARM::STRi12: 837 case ARM::t2STRi12: 838 case ARM::tSTRspi: 839 case ARM::VSTRD: 840 case ARM::VSTRS: 841 if (MI->getOperand(1).isFI() && 842 MI->getOperand(2).isImm() && 843 MI->getOperand(2).getImm() == 0) { 844 FrameIndex = MI->getOperand(1).getIndex(); 845 return MI->getOperand(0).getReg(); 846 } 847 break; 848 case ARM::VST1q64Pseudo: 849 if (MI->getOperand(0).isFI() && 850 MI->getOperand(2).getSubReg() == 0) { 851 FrameIndex = MI->getOperand(0).getIndex(); 852 return MI->getOperand(2).getReg(); 853 } 854 break; 855 case ARM::VSTMQIA: 856 if (MI->getOperand(1).isFI() && 857 MI->getOperand(0).getSubReg() == 0) { 858 FrameIndex = MI->getOperand(1).getIndex(); 859 return MI->getOperand(0).getReg(); 860 } 861 break; 862 } 863 864 return 0; 865} 866 867unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 868 int &FrameIndex) const { 869 const MachineMemOperand *Dummy; 870 return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); 871} 872 873void ARMBaseInstrInfo:: 874loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 875 unsigned DestReg, int FI, 876 const TargetRegisterClass *RC, 877 const TargetRegisterInfo *TRI) const { 878 DebugLoc DL; 879 if (I != MBB.end()) DL = I->getDebugLoc(); 880 MachineFunction &MF = *MBB.getParent(); 881 MachineFrameInfo &MFI = *MF.getFrameInfo(); 882 unsigned Align = MFI.getObjectAlignment(FI); 883 MachineMemOperand *MMO = 884 MF.getMachineMemOperand( 885 MachinePointerInfo::getFixedStack(FI), 886 MachineMemOperand::MOLoad, 887 MFI.getObjectSize(FI), 888 Align); 889 890 switch (RC->getSize()) { 891 case 4: 892 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 893 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 894 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 895 896 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 897 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 898 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 899 } else 900 llvm_unreachable("Unknown reg class!"); 901 break; 902 case 8: 903 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 904 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 905 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 906 } else 907 llvm_unreachable("Unknown reg class!"); 908 break; 909 case 16: 910 if (ARM::QPRRegClass.hasSubClassEq(RC)) { 911 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 912 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg) 913 .addFrameIndex(FI).addImm(16) 914 .addMemOperand(MMO)); 915 } else { 916 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 917 .addFrameIndex(FI) 918 .addMemOperand(MMO)); 919 } 920 } else 921 llvm_unreachable("Unknown reg class!"); 922 break; 923 case 32: 924 if (ARM::QQPRRegClass.hasSubClassEq(RC)) { 925 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 926 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 927 .addFrameIndex(FI).addImm(16) 928 .addMemOperand(MMO)); 929 } else { 930 MachineInstrBuilder MIB = 931 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 932 .addFrameIndex(FI)) 933 .addMemOperand(MMO); 934 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 935 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 936 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 937 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 938 } 939 } else 940 llvm_unreachable("Unknown reg class!"); 941 break; 942 case 64: 943 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 944 MachineInstrBuilder MIB = 945 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 946 .addFrameIndex(FI)) 947 .addMemOperand(MMO); 948 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 949 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 950 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 951 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 952 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 953 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 954 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 955 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 956 } else 957 llvm_unreachable("Unknown reg class!"); 958 break; 959 default: 960 llvm_unreachable("Unknown regclass!"); 961 } 962} 963 964unsigned 965ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 966 int &FrameIndex) const { 967 switch (MI->getOpcode()) { 968 default: break; 969 case ARM::LDRrs: 970 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 971 if (MI->getOperand(1).isFI() && 972 MI->getOperand(2).isReg() && 973 MI->getOperand(3).isImm() && 974 MI->getOperand(2).getReg() == 0 && 975 MI->getOperand(3).getImm() == 0) { 976 FrameIndex = MI->getOperand(1).getIndex(); 977 return MI->getOperand(0).getReg(); 978 } 979 break; 980 case ARM::LDRi12: 981 case ARM::t2LDRi12: 982 case ARM::tLDRspi: 983 case ARM::VLDRD: 984 case ARM::VLDRS: 985 if (MI->getOperand(1).isFI() && 986 MI->getOperand(2).isImm() && 987 MI->getOperand(2).getImm() == 0) { 988 FrameIndex = MI->getOperand(1).getIndex(); 989 return MI->getOperand(0).getReg(); 990 } 991 break; 992 case ARM::VLD1q64Pseudo: 993 if (MI->getOperand(1).isFI() && 994 MI->getOperand(0).getSubReg() == 0) { 995 FrameIndex = MI->getOperand(1).getIndex(); 996 return MI->getOperand(0).getReg(); 997 } 998 break; 999 case ARM::VLDMQIA: 1000 if (MI->getOperand(1).isFI() && 1001 MI->getOperand(0).getSubReg() == 0) { 1002 FrameIndex = MI->getOperand(1).getIndex(); 1003 return MI->getOperand(0).getReg(); 1004 } 1005 break; 1006 } 1007 1008 return 0; 1009} 1010 1011unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 1012 int &FrameIndex) const { 1013 const MachineMemOperand *Dummy; 1014 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); 1015} 1016 1017bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ 1018 // This hook gets to expand COPY instructions before they become 1019 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1020 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1021 // changed into a VORR that can go down the NEON pipeline. 1022 if (!WidenVMOVS || !MI->isCopy()) 1023 return false; 1024 1025 // Look for a copy between even S-registers. That is where we keep floats 1026 // when using NEON v2f32 instructions for f32 arithmetic. 1027 unsigned DstRegS = MI->getOperand(0).getReg(); 1028 unsigned SrcRegS = MI->getOperand(1).getReg(); 1029 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1030 return false; 1031 1032 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1033 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1034 &ARM::DPRRegClass); 1035 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1036 &ARM::DPRRegClass); 1037 if (!DstRegD || !SrcRegD) 1038 return false; 1039 1040 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1041 // legal if the COPY already defines the full DstRegD, and it isn't a 1042 // sub-register insertion. 1043 if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI)) 1044 return false; 1045 1046 // A dead copy shouldn't show up here, but reject it just in case. 1047 if (MI->getOperand(0).isDead()) 1048 return false; 1049 1050 // All clear, widen the COPY. 1051 DEBUG(dbgs() << "widening: " << *MI); 1052 1053 // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg 1054 // or some other super-register. 1055 int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD); 1056 if (ImpDefIdx != -1) 1057 MI->RemoveOperand(ImpDefIdx); 1058 1059 // Change the opcode and operands. 1060 MI->setDesc(get(ARM::VMOVD)); 1061 MI->getOperand(0).setReg(DstRegD); 1062 MI->getOperand(1).setReg(SrcRegD); 1063 AddDefaultPred(MachineInstrBuilder(MI)); 1064 1065 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1066 // register scavenger and machine verifier, so we need to indicate that we 1067 // are reading an undefined value from SrcRegD, but a proper value from 1068 // SrcRegS. 1069 MI->getOperand(1).setIsUndef(); 1070 MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit); 1071 1072 // SrcRegD may actually contain an unrelated value in the ssub_1 1073 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1074 if (MI->getOperand(1).isKill()) { 1075 MI->getOperand(1).setIsKill(false); 1076 MI->addRegisterKilled(SrcRegS, TRI, true); 1077 } 1078 1079 DEBUG(dbgs() << "replaced by: " << *MI); 1080 return true; 1081} 1082 1083MachineInstr* 1084ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, 1085 int FrameIx, uint64_t Offset, 1086 const MDNode *MDPtr, 1087 DebugLoc DL) const { 1088 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE)) 1089 .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); 1090 return &*MIB; 1091} 1092 1093/// Create a copy of a const pool value. Update CPI to the new index and return 1094/// the label UID. 1095static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1096 MachineConstantPool *MCP = MF.getConstantPool(); 1097 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1098 1099 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1100 assert(MCPE.isMachineConstantPoolEntry() && 1101 "Expecting a machine constantpool entry!"); 1102 ARMConstantPoolValue *ACPV = 1103 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1104 1105 unsigned PCLabelId = AFI->createPICLabelUId(); 1106 ARMConstantPoolValue *NewCPV = 0; 1107 // FIXME: The below assumes PIC relocation model and that the function 1108 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1109 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1110 // instructions, so that's probably OK, but is PIC always correct when 1111 // we get here? 1112 if (ACPV->isGlobalValue()) 1113 NewCPV = ARMConstantPoolConstant:: 1114 Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, 1115 ARMCP::CPValue, 4); 1116 else if (ACPV->isExtSymbol()) 1117 NewCPV = ARMConstantPoolSymbol:: 1118 Create(MF.getFunction()->getContext(), 1119 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1120 else if (ACPV->isBlockAddress()) 1121 NewCPV = ARMConstantPoolConstant:: 1122 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1123 ARMCP::CPBlockAddress, 4); 1124 else if (ACPV->isLSDA()) 1125 NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId, 1126 ARMCP::CPLSDA, 4); 1127 else if (ACPV->isMachineBasicBlock()) 1128 NewCPV = ARMConstantPoolMBB:: 1129 Create(MF.getFunction()->getContext(), 1130 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1131 else 1132 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1133 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment()); 1134 return PCLabelId; 1135} 1136 1137void ARMBaseInstrInfo:: 1138reMaterialize(MachineBasicBlock &MBB, 1139 MachineBasicBlock::iterator I, 1140 unsigned DestReg, unsigned SubIdx, 1141 const MachineInstr *Orig, 1142 const TargetRegisterInfo &TRI) const { 1143 unsigned Opcode = Orig->getOpcode(); 1144 switch (Opcode) { 1145 default: { 1146 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 1147 MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 1148 MBB.insert(I, MI); 1149 break; 1150 } 1151 case ARM::tLDRpci_pic: 1152 case ARM::t2LDRpci_pic: { 1153 MachineFunction &MF = *MBB.getParent(); 1154 unsigned CPI = Orig->getOperand(1).getIndex(); 1155 unsigned PCLabelId = duplicateCPV(MF, CPI); 1156 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode), 1157 DestReg) 1158 .addConstantPoolIndex(CPI).addImm(PCLabelId); 1159 MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end()); 1160 break; 1161 } 1162 } 1163} 1164 1165MachineInstr * 1166ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const { 1167 MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF); 1168 switch(Orig->getOpcode()) { 1169 case ARM::tLDRpci_pic: 1170 case ARM::t2LDRpci_pic: { 1171 unsigned CPI = Orig->getOperand(1).getIndex(); 1172 unsigned PCLabelId = duplicateCPV(MF, CPI); 1173 Orig->getOperand(1).setIndex(CPI); 1174 Orig->getOperand(2).setImm(PCLabelId); 1175 break; 1176 } 1177 } 1178 return MI; 1179} 1180 1181bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0, 1182 const MachineInstr *MI1, 1183 const MachineRegisterInfo *MRI) const { 1184 int Opcode = MI0->getOpcode(); 1185 if (Opcode == ARM::t2LDRpci || 1186 Opcode == ARM::t2LDRpci_pic || 1187 Opcode == ARM::tLDRpci || 1188 Opcode == ARM::tLDRpci_pic || 1189 Opcode == ARM::MOV_ga_dyn || 1190 Opcode == ARM::MOV_ga_pcrel || 1191 Opcode == ARM::MOV_ga_pcrel_ldr || 1192 Opcode == ARM::t2MOV_ga_dyn || 1193 Opcode == ARM::t2MOV_ga_pcrel) { 1194 if (MI1->getOpcode() != Opcode) 1195 return false; 1196 if (MI0->getNumOperands() != MI1->getNumOperands()) 1197 return false; 1198 1199 const MachineOperand &MO0 = MI0->getOperand(1); 1200 const MachineOperand &MO1 = MI1->getOperand(1); 1201 if (MO0.getOffset() != MO1.getOffset()) 1202 return false; 1203 1204 if (Opcode == ARM::MOV_ga_dyn || 1205 Opcode == ARM::MOV_ga_pcrel || 1206 Opcode == ARM::MOV_ga_pcrel_ldr || 1207 Opcode == ARM::t2MOV_ga_dyn || 1208 Opcode == ARM::t2MOV_ga_pcrel) 1209 // Ignore the PC labels. 1210 return MO0.getGlobal() == MO1.getGlobal(); 1211 1212 const MachineFunction *MF = MI0->getParent()->getParent(); 1213 const MachineConstantPool *MCP = MF->getConstantPool(); 1214 int CPI0 = MO0.getIndex(); 1215 int CPI1 = MO1.getIndex(); 1216 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1217 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1218 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1219 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1220 if (isARMCP0 && isARMCP1) { 1221 ARMConstantPoolValue *ACPV0 = 1222 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1223 ARMConstantPoolValue *ACPV1 = 1224 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1225 return ACPV0->hasSameValue(ACPV1); 1226 } else if (!isARMCP0 && !isARMCP1) { 1227 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1228 } 1229 return false; 1230 } else if (Opcode == ARM::PICLDR) { 1231 if (MI1->getOpcode() != Opcode) 1232 return false; 1233 if (MI0->getNumOperands() != MI1->getNumOperands()) 1234 return false; 1235 1236 unsigned Addr0 = MI0->getOperand(1).getReg(); 1237 unsigned Addr1 = MI1->getOperand(1).getReg(); 1238 if (Addr0 != Addr1) { 1239 if (!MRI || 1240 !TargetRegisterInfo::isVirtualRegister(Addr0) || 1241 !TargetRegisterInfo::isVirtualRegister(Addr1)) 1242 return false; 1243 1244 // This assumes SSA form. 1245 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1246 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1247 // Check if the loaded value, e.g. a constantpool of a global address, are 1248 // the same. 1249 if (!produceSameValue(Def0, Def1, MRI)) 1250 return false; 1251 } 1252 1253 for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) { 1254 // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg 1255 const MachineOperand &MO0 = MI0->getOperand(i); 1256 const MachineOperand &MO1 = MI1->getOperand(i); 1257 if (!MO0.isIdenticalTo(MO1)) 1258 return false; 1259 } 1260 return true; 1261 } 1262 1263 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1264} 1265 1266/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1267/// determine if two loads are loading from the same base address. It should 1268/// only return true if the base pointers are the same and the only differences 1269/// between the two addresses is the offset. It also returns the offsets by 1270/// reference. 1271bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1272 int64_t &Offset1, 1273 int64_t &Offset2) const { 1274 // Don't worry about Thumb: just ARM and Thumb2. 1275 if (Subtarget.isThumb1Only()) return false; 1276 1277 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1278 return false; 1279 1280 switch (Load1->getMachineOpcode()) { 1281 default: 1282 return false; 1283 case ARM::LDRi12: 1284 case ARM::LDRBi12: 1285 case ARM::LDRD: 1286 case ARM::LDRH: 1287 case ARM::LDRSB: 1288 case ARM::LDRSH: 1289 case ARM::VLDRD: 1290 case ARM::VLDRS: 1291 case ARM::t2LDRi8: 1292 case ARM::t2LDRDi8: 1293 case ARM::t2LDRSHi8: 1294 case ARM::t2LDRi12: 1295 case ARM::t2LDRSHi12: 1296 break; 1297 } 1298 1299 switch (Load2->getMachineOpcode()) { 1300 default: 1301 return false; 1302 case ARM::LDRi12: 1303 case ARM::LDRBi12: 1304 case ARM::LDRD: 1305 case ARM::LDRH: 1306 case ARM::LDRSB: 1307 case ARM::LDRSH: 1308 case ARM::VLDRD: 1309 case ARM::VLDRS: 1310 case ARM::t2LDRi8: 1311 case ARM::t2LDRDi8: 1312 case ARM::t2LDRSHi8: 1313 case ARM::t2LDRi12: 1314 case ARM::t2LDRSHi12: 1315 break; 1316 } 1317 1318 // Check if base addresses and chain operands match. 1319 if (Load1->getOperand(0) != Load2->getOperand(0) || 1320 Load1->getOperand(4) != Load2->getOperand(4)) 1321 return false; 1322 1323 // Index should be Reg0. 1324 if (Load1->getOperand(3) != Load2->getOperand(3)) 1325 return false; 1326 1327 // Determine the offsets. 1328 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 1329 isa<ConstantSDNode>(Load2->getOperand(1))) { 1330 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 1331 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 1332 return true; 1333 } 1334 1335 return false; 1336} 1337 1338/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 1339/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 1340/// be scheduled togther. On some targets if two loads are loading from 1341/// addresses in the same cache line, it's better if they are scheduled 1342/// together. This function takes two integers that represent the load offsets 1343/// from the common base address. It returns true if it decides it's desirable 1344/// to schedule the two loads together. "NumLoads" is the number of loads that 1345/// have already been scheduled after Load1. 1346bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1347 int64_t Offset1, int64_t Offset2, 1348 unsigned NumLoads) const { 1349 // Don't worry about Thumb: just ARM and Thumb2. 1350 if (Subtarget.isThumb1Only()) return false; 1351 1352 assert(Offset2 > Offset1); 1353 1354 if ((Offset2 - Offset1) / 8 > 64) 1355 return false; 1356 1357 if (Load1->getMachineOpcode() != Load2->getMachineOpcode()) 1358 return false; // FIXME: overly conservative? 1359 1360 // Four loads in a row should be sufficient. 1361 if (NumLoads >= 3) 1362 return false; 1363 1364 return true; 1365} 1366 1367bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, 1368 const MachineBasicBlock *MBB, 1369 const MachineFunction &MF) const { 1370 // Debug info is never a scheduling boundary. It's necessary to be explicit 1371 // due to the special treatment of IT instructions below, otherwise a 1372 // dbg_value followed by an IT will result in the IT instruction being 1373 // considered a scheduling hazard, which is wrong. It should be the actual 1374 // instruction preceding the dbg_value instruction(s), just like it is 1375 // when debug info is not present. 1376 if (MI->isDebugValue()) 1377 return false; 1378 1379 // Terminators and labels can't be scheduled around. 1380 if (MI->isTerminator() || MI->isLabel()) 1381 return true; 1382 1383 // Treat the start of the IT block as a scheduling boundary, but schedule 1384 // t2IT along with all instructions following it. 1385 // FIXME: This is a big hammer. But the alternative is to add all potential 1386 // true and anti dependencies to IT block instructions as implicit operands 1387 // to the t2IT instruction. The added compile time and complexity does not 1388 // seem worth it. 1389 MachineBasicBlock::const_iterator I = MI; 1390 // Make sure to skip any dbg_value instructions 1391 while (++I != MBB->end() && I->isDebugValue()) 1392 ; 1393 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 1394 return true; 1395 1396 // Don't attempt to schedule around any instruction that defines 1397 // a stack-oriented pointer, as it's unlikely to be profitable. This 1398 // saves compile time, because it doesn't require every single 1399 // stack slot reference to depend on the instruction that does the 1400 // modification. 1401 // Calls don't actually change the stack pointer, even if they have imp-defs. 1402 // No ARM calling conventions change the stack pointer. (X86 calling 1403 // conventions sometimes do). 1404 if (!MI->isCall() && MI->definesRegister(ARM::SP)) 1405 return true; 1406 1407 return false; 1408} 1409 1410bool ARMBaseInstrInfo:: 1411isProfitableToIfCvt(MachineBasicBlock &MBB, 1412 unsigned NumCycles, unsigned ExtraPredCycles, 1413 const BranchProbability &Probability) const { 1414 if (!NumCycles) 1415 return false; 1416 1417 // Attempt to estimate the relative costs of predication versus branching. 1418 unsigned UnpredCost = Probability.getNumerator() * NumCycles; 1419 UnpredCost /= Probability.getDenominator(); 1420 UnpredCost += 1; // The branch itself 1421 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1422 1423 return (NumCycles + ExtraPredCycles) <= UnpredCost; 1424} 1425 1426bool ARMBaseInstrInfo:: 1427isProfitableToIfCvt(MachineBasicBlock &TMBB, 1428 unsigned TCycles, unsigned TExtra, 1429 MachineBasicBlock &FMBB, 1430 unsigned FCycles, unsigned FExtra, 1431 const BranchProbability &Probability) const { 1432 if (!TCycles || !FCycles) 1433 return false; 1434 1435 // Attempt to estimate the relative costs of predication versus branching. 1436 unsigned TUnpredCost = Probability.getNumerator() * TCycles; 1437 TUnpredCost /= Probability.getDenominator(); 1438 1439 uint32_t Comp = Probability.getDenominator() - Probability.getNumerator(); 1440 unsigned FUnpredCost = Comp * FCycles; 1441 FUnpredCost /= Probability.getDenominator(); 1442 1443 unsigned UnpredCost = TUnpredCost + FUnpredCost; 1444 UnpredCost += 1; // The branch itself 1445 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1446 1447 return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost; 1448} 1449 1450/// getInstrPredicate - If instruction is predicated, returns its predicate 1451/// condition, otherwise returns AL. It also returns the condition code 1452/// register by reference. 1453ARMCC::CondCodes 1454llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) { 1455 int PIdx = MI->findFirstPredOperandIdx(); 1456 if (PIdx == -1) { 1457 PredReg = 0; 1458 return ARMCC::AL; 1459 } 1460 1461 PredReg = MI->getOperand(PIdx+1).getReg(); 1462 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm(); 1463} 1464 1465 1466int llvm::getMatchingCondBranchOpcode(int Opc) { 1467 if (Opc == ARM::B) 1468 return ARM::Bcc; 1469 if (Opc == ARM::tB) 1470 return ARM::tBcc; 1471 if (Opc == ARM::t2B) 1472 return ARM::t2Bcc; 1473 1474 llvm_unreachable("Unknown unconditional branch opcode!"); 1475} 1476 1477 1478/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 1479/// instruction is encoded with an 'S' bit is determined by the optional CPSR 1480/// def operand. 1481/// 1482/// This will go away once we can teach tblgen how to set the optional CPSR def 1483/// operand itself. 1484struct AddSubFlagsOpcodePair { 1485 unsigned PseudoOpc; 1486 unsigned MachineOpc; 1487}; 1488 1489static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 1490 {ARM::ADDSri, ARM::ADDri}, 1491 {ARM::ADDSrr, ARM::ADDrr}, 1492 {ARM::ADDSrsi, ARM::ADDrsi}, 1493 {ARM::ADDSrsr, ARM::ADDrsr}, 1494 1495 {ARM::SUBSri, ARM::SUBri}, 1496 {ARM::SUBSrr, ARM::SUBrr}, 1497 {ARM::SUBSrsi, ARM::SUBrsi}, 1498 {ARM::SUBSrsr, ARM::SUBrsr}, 1499 1500 {ARM::RSBSri, ARM::RSBri}, 1501 {ARM::RSBSrsi, ARM::RSBrsi}, 1502 {ARM::RSBSrsr, ARM::RSBrsr}, 1503 1504 {ARM::t2ADDSri, ARM::t2ADDri}, 1505 {ARM::t2ADDSrr, ARM::t2ADDrr}, 1506 {ARM::t2ADDSrs, ARM::t2ADDrs}, 1507 1508 {ARM::t2SUBSri, ARM::t2SUBri}, 1509 {ARM::t2SUBSrr, ARM::t2SUBrr}, 1510 {ARM::t2SUBSrs, ARM::t2SUBrs}, 1511 1512 {ARM::t2RSBSri, ARM::t2RSBri}, 1513 {ARM::t2RSBSrs, ARM::t2RSBrs}, 1514}; 1515 1516unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 1517 static const int NPairs = 1518 sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair); 1519 for (AddSubFlagsOpcodePair *OpcPair = &AddSubFlagsOpcodeMap[0], 1520 *End = &AddSubFlagsOpcodeMap[NPairs]; OpcPair != End; ++OpcPair) { 1521 if (OldOpc == OpcPair->PseudoOpc) { 1522 return OpcPair->MachineOpc; 1523 } 1524 } 1525 return 0; 1526} 1527 1528void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 1529 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 1530 unsigned DestReg, unsigned BaseReg, int NumBytes, 1531 ARMCC::CondCodes Pred, unsigned PredReg, 1532 const ARMBaseInstrInfo &TII, unsigned MIFlags) { 1533 bool isSub = NumBytes < 0; 1534 if (isSub) NumBytes = -NumBytes; 1535 1536 while (NumBytes) { 1537 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 1538 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 1539 assert(ThisVal && "Didn't extract field correctly"); 1540 1541 // We will handle these bits from offset, clear them. 1542 NumBytes &= ~ThisVal; 1543 1544 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 1545 1546 // Build the new ADD / SUB. 1547 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 1548 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 1549 .addReg(BaseReg, RegState::Kill).addImm(ThisVal) 1550 .addImm((unsigned)Pred).addReg(PredReg).addReg(0) 1551 .setMIFlags(MIFlags); 1552 BaseReg = DestReg; 1553 } 1554} 1555 1556bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 1557 unsigned FrameReg, int &Offset, 1558 const ARMBaseInstrInfo &TII) { 1559 unsigned Opcode = MI.getOpcode(); 1560 const MCInstrDesc &Desc = MI.getDesc(); 1561 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 1562 bool isSub = false; 1563 1564 // Memory operands in inline assembly always use AddrMode2. 1565 if (Opcode == ARM::INLINEASM) 1566 AddrMode = ARMII::AddrMode2; 1567 1568 if (Opcode == ARM::ADDri) { 1569 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 1570 if (Offset == 0) { 1571 // Turn it into a move. 1572 MI.setDesc(TII.get(ARM::MOVr)); 1573 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1574 MI.RemoveOperand(FrameRegIdx+1); 1575 Offset = 0; 1576 return true; 1577 } else if (Offset < 0) { 1578 Offset = -Offset; 1579 isSub = true; 1580 MI.setDesc(TII.get(ARM::SUBri)); 1581 } 1582 1583 // Common case: small offset, fits into instruction. 1584 if (ARM_AM::getSOImmVal(Offset) != -1) { 1585 // Replace the FrameIndex with sp / fp 1586 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1587 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 1588 Offset = 0; 1589 return true; 1590 } 1591 1592 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 1593 // as possible. 1594 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 1595 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 1596 1597 // We will handle these bits from offset, clear them. 1598 Offset &= ~ThisImmVal; 1599 1600 // Get the properly encoded SOImmVal field. 1601 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 1602 "Bit extraction didn't work?"); 1603 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 1604 } else { 1605 unsigned ImmIdx = 0; 1606 int InstrOffs = 0; 1607 unsigned NumBits = 0; 1608 unsigned Scale = 1; 1609 switch (AddrMode) { 1610 case ARMII::AddrMode_i12: { 1611 ImmIdx = FrameRegIdx + 1; 1612 InstrOffs = MI.getOperand(ImmIdx).getImm(); 1613 NumBits = 12; 1614 break; 1615 } 1616 case ARMII::AddrMode2: { 1617 ImmIdx = FrameRegIdx+2; 1618 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 1619 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1620 InstrOffs *= -1; 1621 NumBits = 12; 1622 break; 1623 } 1624 case ARMII::AddrMode3: { 1625 ImmIdx = FrameRegIdx+2; 1626 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 1627 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1628 InstrOffs *= -1; 1629 NumBits = 8; 1630 break; 1631 } 1632 case ARMII::AddrMode4: 1633 case ARMII::AddrMode6: 1634 // Can't fold any offset even if it's zero. 1635 return false; 1636 case ARMII::AddrMode5: { 1637 ImmIdx = FrameRegIdx+1; 1638 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 1639 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1640 InstrOffs *= -1; 1641 NumBits = 8; 1642 Scale = 4; 1643 break; 1644 } 1645 default: 1646 llvm_unreachable("Unsupported addressing mode!"); 1647 } 1648 1649 Offset += InstrOffs * Scale; 1650 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 1651 if (Offset < 0) { 1652 Offset = -Offset; 1653 isSub = true; 1654 } 1655 1656 // Attempt to fold address comp. if opcode has offset bits 1657 if (NumBits > 0) { 1658 // Common case: small offset, fits into instruction. 1659 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 1660 int ImmedOffset = Offset / Scale; 1661 unsigned Mask = (1 << NumBits) - 1; 1662 if ((unsigned)Offset <= Mask * Scale) { 1663 // Replace the FrameIndex with sp 1664 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1665 // FIXME: When addrmode2 goes away, this will simplify (like the 1666 // T2 version), as the LDR.i12 versions don't need the encoding 1667 // tricks for the offset value. 1668 if (isSub) { 1669 if (AddrMode == ARMII::AddrMode_i12) 1670 ImmedOffset = -ImmedOffset; 1671 else 1672 ImmedOffset |= 1 << NumBits; 1673 } 1674 ImmOp.ChangeToImmediate(ImmedOffset); 1675 Offset = 0; 1676 return true; 1677 } 1678 1679 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 1680 ImmedOffset = ImmedOffset & Mask; 1681 if (isSub) { 1682 if (AddrMode == ARMII::AddrMode_i12) 1683 ImmedOffset = -ImmedOffset; 1684 else 1685 ImmedOffset |= 1 << NumBits; 1686 } 1687 ImmOp.ChangeToImmediate(ImmedOffset); 1688 Offset &= ~(Mask*Scale); 1689 } 1690 } 1691 1692 Offset = (isSub) ? -Offset : Offset; 1693 return Offset == 0; 1694} 1695 1696bool ARMBaseInstrInfo:: 1697AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask, 1698 int &CmpValue) const { 1699 switch (MI->getOpcode()) { 1700 default: break; 1701 case ARM::CMPri: 1702 case ARM::t2CMPri: 1703 SrcReg = MI->getOperand(0).getReg(); 1704 CmpMask = ~0; 1705 CmpValue = MI->getOperand(1).getImm(); 1706 return true; 1707 case ARM::TSTri: 1708 case ARM::t2TSTri: 1709 SrcReg = MI->getOperand(0).getReg(); 1710 CmpMask = MI->getOperand(1).getImm(); 1711 CmpValue = 0; 1712 return true; 1713 } 1714 1715 return false; 1716} 1717 1718/// isSuitableForMask - Identify a suitable 'and' instruction that 1719/// operates on the given source register and applies the same mask 1720/// as a 'tst' instruction. Provide a limited look-through for copies. 1721/// When successful, MI will hold the found instruction. 1722static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, 1723 int CmpMask, bool CommonUse) { 1724 switch (MI->getOpcode()) { 1725 case ARM::ANDri: 1726 case ARM::t2ANDri: 1727 if (CmpMask != MI->getOperand(2).getImm()) 1728 return false; 1729 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 1730 return true; 1731 break; 1732 case ARM::COPY: { 1733 // Walk down one instruction which is potentially an 'and'. 1734 const MachineInstr &Copy = *MI; 1735 MachineBasicBlock::iterator AND( 1736 llvm::next(MachineBasicBlock::iterator(MI))); 1737 if (AND == MI->getParent()->end()) return false; 1738 MI = AND; 1739 return isSuitableForMask(MI, Copy.getOperand(0).getReg(), 1740 CmpMask, true); 1741 } 1742 } 1743 1744 return false; 1745} 1746 1747/// OptimizeCompareInstr - Convert the instruction supplying the argument to the 1748/// comparison into one that sets the zero bit in the flags register. 1749bool ARMBaseInstrInfo:: 1750OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask, 1751 int CmpValue, const MachineRegisterInfo *MRI) const { 1752 if (CmpValue != 0) 1753 return false; 1754 1755 MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg); 1756 if (llvm::next(DI) != MRI->def_end()) 1757 // Only support one definition. 1758 return false; 1759 1760 MachineInstr *MI = &*DI; 1761 1762 // Masked compares sometimes use the same register as the corresponding 'and'. 1763 if (CmpMask != ~0) { 1764 if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) { 1765 MI = 0; 1766 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg), 1767 UE = MRI->use_end(); UI != UE; ++UI) { 1768 if (UI->getParent() != CmpInstr->getParent()) continue; 1769 MachineInstr *PotentialAND = &*UI; 1770 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true)) 1771 continue; 1772 MI = PotentialAND; 1773 break; 1774 } 1775 if (!MI) return false; 1776 } 1777 } 1778 1779 // Conservatively refuse to convert an instruction which isn't in the same BB 1780 // as the comparison. 1781 if (MI->getParent() != CmpInstr->getParent()) 1782 return false; 1783 1784 // Check that CPSR isn't set between the comparison instruction and the one we 1785 // want to change. 1786 MachineBasicBlock::iterator I = CmpInstr,E = MI, B = MI->getParent()->begin(); 1787 1788 // Early exit if CmpInstr is at the beginning of the BB. 1789 if (I == B) return false; 1790 1791 --I; 1792 for (; I != E; --I) { 1793 const MachineInstr &Instr = *I; 1794 1795 for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) { 1796 const MachineOperand &MO = Instr.getOperand(IO); 1797 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) 1798 return false; 1799 if (!MO.isReg()) continue; 1800 1801 // This instruction modifies or uses CPSR after the one we want to 1802 // change. We can't do this transformation. 1803 if (MO.getReg() == ARM::CPSR) 1804 return false; 1805 } 1806 1807 if (I == B) 1808 // The 'and' is below the comparison instruction. 1809 return false; 1810 } 1811 1812 // Set the "zero" bit in CPSR. 1813 switch (MI->getOpcode()) { 1814 default: break; 1815 case ARM::RSBrr: 1816 case ARM::RSBri: 1817 case ARM::RSCrr: 1818 case ARM::RSCri: 1819 case ARM::ADDrr: 1820 case ARM::ADDri: 1821 case ARM::ADCrr: 1822 case ARM::ADCri: 1823 case ARM::SUBrr: 1824 case ARM::SUBri: 1825 case ARM::SBCrr: 1826 case ARM::SBCri: 1827 case ARM::t2RSBri: 1828 case ARM::t2ADDrr: 1829 case ARM::t2ADDri: 1830 case ARM::t2ADCrr: 1831 case ARM::t2ADCri: 1832 case ARM::t2SUBrr: 1833 case ARM::t2SUBri: 1834 case ARM::t2SBCrr: 1835 case ARM::t2SBCri: 1836 case ARM::ANDrr: 1837 case ARM::ANDri: 1838 case ARM::t2ANDrr: 1839 case ARM::t2ANDri: 1840 case ARM::ORRrr: 1841 case ARM::ORRri: 1842 case ARM::t2ORRrr: 1843 case ARM::t2ORRri: 1844 case ARM::EORrr: 1845 case ARM::EORri: 1846 case ARM::t2EORrr: 1847 case ARM::t2EORri: { 1848 // Scan forward for the use of CPSR, if it's a conditional code requires 1849 // checking of V bit, then this is not safe to do. If we can't find the 1850 // CPSR use (i.e. used in another block), then it's not safe to perform 1851 // the optimization. 1852 bool isSafe = false; 1853 I = CmpInstr; 1854 E = MI->getParent()->end(); 1855 while (!isSafe && ++I != E) { 1856 const MachineInstr &Instr = *I; 1857 for (unsigned IO = 0, EO = Instr.getNumOperands(); 1858 !isSafe && IO != EO; ++IO) { 1859 const MachineOperand &MO = Instr.getOperand(IO); 1860 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 1861 isSafe = true; 1862 break; 1863 } 1864 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 1865 continue; 1866 if (MO.isDef()) { 1867 isSafe = true; 1868 break; 1869 } 1870 // Condition code is after the operand before CPSR. 1871 ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm(); 1872 switch (CC) { 1873 default: 1874 isSafe = true; 1875 break; 1876 case ARMCC::VS: 1877 case ARMCC::VC: 1878 case ARMCC::GE: 1879 case ARMCC::LT: 1880 case ARMCC::GT: 1881 case ARMCC::LE: 1882 return false; 1883 } 1884 } 1885 } 1886 1887 if (!isSafe) 1888 return false; 1889 1890 // Toggle the optional operand to CPSR. 1891 MI->getOperand(5).setReg(ARM::CPSR); 1892 MI->getOperand(5).setIsDef(true); 1893 CmpInstr->eraseFromParent(); 1894 return true; 1895 } 1896 } 1897 1898 return false; 1899} 1900 1901bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, 1902 MachineInstr *DefMI, unsigned Reg, 1903 MachineRegisterInfo *MRI) const { 1904 // Fold large immediates into add, sub, or, xor. 1905 unsigned DefOpc = DefMI->getOpcode(); 1906 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 1907 return false; 1908 if (!DefMI->getOperand(1).isImm()) 1909 // Could be t2MOVi32imm <ga:xx> 1910 return false; 1911 1912 if (!MRI->hasOneNonDBGUse(Reg)) 1913 return false; 1914 1915 unsigned UseOpc = UseMI->getOpcode(); 1916 unsigned NewUseOpc = 0; 1917 uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm(); 1918 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 1919 bool Commute = false; 1920 switch (UseOpc) { 1921 default: return false; 1922 case ARM::SUBrr: 1923 case ARM::ADDrr: 1924 case ARM::ORRrr: 1925 case ARM::EORrr: 1926 case ARM::t2SUBrr: 1927 case ARM::t2ADDrr: 1928 case ARM::t2ORRrr: 1929 case ARM::t2EORrr: { 1930 Commute = UseMI->getOperand(2).getReg() != Reg; 1931 switch (UseOpc) { 1932 default: break; 1933 case ARM::SUBrr: { 1934 if (Commute) 1935 return false; 1936 ImmVal = -ImmVal; 1937 NewUseOpc = ARM::SUBri; 1938 // Fallthrough 1939 } 1940 case ARM::ADDrr: 1941 case ARM::ORRrr: 1942 case ARM::EORrr: { 1943 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 1944 return false; 1945 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 1946 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 1947 switch (UseOpc) { 1948 default: break; 1949 case ARM::ADDrr: NewUseOpc = ARM::ADDri; break; 1950 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 1951 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 1952 } 1953 break; 1954 } 1955 case ARM::t2SUBrr: { 1956 if (Commute) 1957 return false; 1958 ImmVal = -ImmVal; 1959 NewUseOpc = ARM::t2SUBri; 1960 // Fallthrough 1961 } 1962 case ARM::t2ADDrr: 1963 case ARM::t2ORRrr: 1964 case ARM::t2EORrr: { 1965 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 1966 return false; 1967 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 1968 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 1969 switch (UseOpc) { 1970 default: break; 1971 case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break; 1972 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 1973 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 1974 } 1975 break; 1976 } 1977 } 1978 } 1979 } 1980 1981 unsigned OpIdx = Commute ? 2 : 1; 1982 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg(); 1983 bool isKill = UseMI->getOperand(OpIdx).isKill(); 1984 unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); 1985 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(), 1986 UseMI, UseMI->getDebugLoc(), 1987 get(NewUseOpc), NewReg) 1988 .addReg(Reg1, getKillRegState(isKill)) 1989 .addImm(SOImmValV1))); 1990 UseMI->setDesc(get(NewUseOpc)); 1991 UseMI->getOperand(1).setReg(NewReg); 1992 UseMI->getOperand(1).setIsKill(); 1993 UseMI->getOperand(2).ChangeToImmediate(SOImmValV2); 1994 DefMI->eraseFromParent(); 1995 return true; 1996} 1997 1998unsigned 1999ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 2000 const MachineInstr *MI) const { 2001 if (!ItinData || ItinData->isEmpty()) 2002 return 1; 2003 2004 const MCInstrDesc &Desc = MI->getDesc(); 2005 unsigned Class = Desc.getSchedClass(); 2006 unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; 2007 if (UOps) 2008 return UOps; 2009 2010 unsigned Opc = MI->getOpcode(); 2011 switch (Opc) { 2012 default: 2013 llvm_unreachable("Unexpected multi-uops instruction!"); 2014 case ARM::VLDMQIA: 2015 case ARM::VSTMQIA: 2016 return 2; 2017 2018 // The number of uOps for load / store multiple are determined by the number 2019 // registers. 2020 // 2021 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 2022 // same cycle. The scheduling for the first load / store must be done 2023 // separately by assuming the the address is not 64-bit aligned. 2024 // 2025 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 2026 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 2027 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 2028 case ARM::VLDMDIA: 2029 case ARM::VLDMDIA_UPD: 2030 case ARM::VLDMDDB_UPD: 2031 case ARM::VLDMSIA: 2032 case ARM::VLDMSIA_UPD: 2033 case ARM::VLDMSDB_UPD: 2034 case ARM::VSTMDIA: 2035 case ARM::VSTMDIA_UPD: 2036 case ARM::VSTMDDB_UPD: 2037 case ARM::VSTMSIA: 2038 case ARM::VSTMSIA_UPD: 2039 case ARM::VSTMSDB_UPD: { 2040 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands(); 2041 return (NumRegs / 2) + (NumRegs % 2) + 1; 2042 } 2043 2044 case ARM::LDMIA_RET: 2045 case ARM::LDMIA: 2046 case ARM::LDMDA: 2047 case ARM::LDMDB: 2048 case ARM::LDMIB: 2049 case ARM::LDMIA_UPD: 2050 case ARM::LDMDA_UPD: 2051 case ARM::LDMDB_UPD: 2052 case ARM::LDMIB_UPD: 2053 case ARM::STMIA: 2054 case ARM::STMDA: 2055 case ARM::STMDB: 2056 case ARM::STMIB: 2057 case ARM::STMIA_UPD: 2058 case ARM::STMDA_UPD: 2059 case ARM::STMDB_UPD: 2060 case ARM::STMIB_UPD: 2061 case ARM::tLDMIA: 2062 case ARM::tLDMIA_UPD: 2063 case ARM::tSTMIA_UPD: 2064 case ARM::tPOP_RET: 2065 case ARM::tPOP: 2066 case ARM::tPUSH: 2067 case ARM::t2LDMIA_RET: 2068 case ARM::t2LDMIA: 2069 case ARM::t2LDMDB: 2070 case ARM::t2LDMIA_UPD: 2071 case ARM::t2LDMDB_UPD: 2072 case ARM::t2STMIA: 2073 case ARM::t2STMDB: 2074 case ARM::t2STMIA_UPD: 2075 case ARM::t2STMDB_UPD: { 2076 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1; 2077 if (Subtarget.isCortexA8()) { 2078 if (NumRegs < 4) 2079 return 2; 2080 // 4 registers would be issued: 2, 2. 2081 // 5 registers would be issued: 2, 2, 1. 2082 UOps = (NumRegs / 2); 2083 if (NumRegs % 2) 2084 ++UOps; 2085 return UOps; 2086 } else if (Subtarget.isCortexA9()) { 2087 UOps = (NumRegs / 2); 2088 // If there are odd number of registers or if it's not 64-bit aligned, 2089 // then it takes an extra AGU (Address Generation Unit) cycle. 2090 if ((NumRegs % 2) || 2091 !MI->hasOneMemOperand() || 2092 (*MI->memoperands_begin())->getAlignment() < 8) 2093 ++UOps; 2094 return UOps; 2095 } else { 2096 // Assume the worst. 2097 return NumRegs; 2098 } 2099 } 2100 } 2101} 2102 2103int 2104ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 2105 const MCInstrDesc &DefMCID, 2106 unsigned DefClass, 2107 unsigned DefIdx, unsigned DefAlign) const { 2108 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 2109 if (RegNo <= 0) 2110 // Def is the address writeback. 2111 return ItinData->getOperandCycle(DefClass, DefIdx); 2112 2113 int DefCycle; 2114 if (Subtarget.isCortexA8()) { 2115 // (regno / 2) + (regno % 2) + 1 2116 DefCycle = RegNo / 2 + 1; 2117 if (RegNo % 2) 2118 ++DefCycle; 2119 } else if (Subtarget.isCortexA9()) { 2120 DefCycle = RegNo; 2121 bool isSLoad = false; 2122 2123 switch (DefMCID.getOpcode()) { 2124 default: break; 2125 case ARM::VLDMSIA: 2126 case ARM::VLDMSIA_UPD: 2127 case ARM::VLDMSDB_UPD: 2128 isSLoad = true; 2129 break; 2130 } 2131 2132 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 2133 // then it takes an extra cycle. 2134 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 2135 ++DefCycle; 2136 } else { 2137 // Assume the worst. 2138 DefCycle = RegNo + 2; 2139 } 2140 2141 return DefCycle; 2142} 2143 2144int 2145ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 2146 const MCInstrDesc &DefMCID, 2147 unsigned DefClass, 2148 unsigned DefIdx, unsigned DefAlign) const { 2149 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 2150 if (RegNo <= 0) 2151 // Def is the address writeback. 2152 return ItinData->getOperandCycle(DefClass, DefIdx); 2153 2154 int DefCycle; 2155 if (Subtarget.isCortexA8()) { 2156 // 4 registers would be issued: 1, 2, 1. 2157 // 5 registers would be issued: 1, 2, 2. 2158 DefCycle = RegNo / 2; 2159 if (DefCycle < 1) 2160 DefCycle = 1; 2161 // Result latency is issue cycle + 2: E2. 2162 DefCycle += 2; 2163 } else if (Subtarget.isCortexA9()) { 2164 DefCycle = (RegNo / 2); 2165 // If there are odd number of registers or if it's not 64-bit aligned, 2166 // then it takes an extra AGU (Address Generation Unit) cycle. 2167 if ((RegNo % 2) || DefAlign < 8) 2168 ++DefCycle; 2169 // Result latency is AGU cycles + 2. 2170 DefCycle += 2; 2171 } else { 2172 // Assume the worst. 2173 DefCycle = RegNo + 2; 2174 } 2175 2176 return DefCycle; 2177} 2178 2179int 2180ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 2181 const MCInstrDesc &UseMCID, 2182 unsigned UseClass, 2183 unsigned UseIdx, unsigned UseAlign) const { 2184 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 2185 if (RegNo <= 0) 2186 return ItinData->getOperandCycle(UseClass, UseIdx); 2187 2188 int UseCycle; 2189 if (Subtarget.isCortexA8()) { 2190 // (regno / 2) + (regno % 2) + 1 2191 UseCycle = RegNo / 2 + 1; 2192 if (RegNo % 2) 2193 ++UseCycle; 2194 } else if (Subtarget.isCortexA9()) { 2195 UseCycle = RegNo; 2196 bool isSStore = false; 2197 2198 switch (UseMCID.getOpcode()) { 2199 default: break; 2200 case ARM::VSTMSIA: 2201 case ARM::VSTMSIA_UPD: 2202 case ARM::VSTMSDB_UPD: 2203 isSStore = true; 2204 break; 2205 } 2206 2207 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 2208 // then it takes an extra cycle. 2209 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 2210 ++UseCycle; 2211 } else { 2212 // Assume the worst. 2213 UseCycle = RegNo + 2; 2214 } 2215 2216 return UseCycle; 2217} 2218 2219int 2220ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 2221 const MCInstrDesc &UseMCID, 2222 unsigned UseClass, 2223 unsigned UseIdx, unsigned UseAlign) const { 2224 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 2225 if (RegNo <= 0) 2226 return ItinData->getOperandCycle(UseClass, UseIdx); 2227 2228 int UseCycle; 2229 if (Subtarget.isCortexA8()) { 2230 UseCycle = RegNo / 2; 2231 if (UseCycle < 2) 2232 UseCycle = 2; 2233 // Read in E3. 2234 UseCycle += 2; 2235 } else if (Subtarget.isCortexA9()) { 2236 UseCycle = (RegNo / 2); 2237 // If there are odd number of registers or if it's not 64-bit aligned, 2238 // then it takes an extra AGU (Address Generation Unit) cycle. 2239 if ((RegNo % 2) || UseAlign < 8) 2240 ++UseCycle; 2241 } else { 2242 // Assume the worst. 2243 UseCycle = 1; 2244 } 2245 return UseCycle; 2246} 2247 2248int 2249ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2250 const MCInstrDesc &DefMCID, 2251 unsigned DefIdx, unsigned DefAlign, 2252 const MCInstrDesc &UseMCID, 2253 unsigned UseIdx, unsigned UseAlign) const { 2254 unsigned DefClass = DefMCID.getSchedClass(); 2255 unsigned UseClass = UseMCID.getSchedClass(); 2256 2257 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 2258 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 2259 2260 // This may be a def / use of a variable_ops instruction, the operand 2261 // latency might be determinable dynamically. Let the target try to 2262 // figure it out. 2263 int DefCycle = -1; 2264 bool LdmBypass = false; 2265 switch (DefMCID.getOpcode()) { 2266 default: 2267 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 2268 break; 2269 2270 case ARM::VLDMDIA: 2271 case ARM::VLDMDIA_UPD: 2272 case ARM::VLDMDDB_UPD: 2273 case ARM::VLDMSIA: 2274 case ARM::VLDMSIA_UPD: 2275 case ARM::VLDMSDB_UPD: 2276 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 2277 break; 2278 2279 case ARM::LDMIA_RET: 2280 case ARM::LDMIA: 2281 case ARM::LDMDA: 2282 case ARM::LDMDB: 2283 case ARM::LDMIB: 2284 case ARM::LDMIA_UPD: 2285 case ARM::LDMDA_UPD: 2286 case ARM::LDMDB_UPD: 2287 case ARM::LDMIB_UPD: 2288 case ARM::tLDMIA: 2289 case ARM::tLDMIA_UPD: 2290 case ARM::tPUSH: 2291 case ARM::t2LDMIA_RET: 2292 case ARM::t2LDMIA: 2293 case ARM::t2LDMDB: 2294 case ARM::t2LDMIA_UPD: 2295 case ARM::t2LDMDB_UPD: 2296 LdmBypass = 1; 2297 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 2298 break; 2299 } 2300 2301 if (DefCycle == -1) 2302 // We can't seem to determine the result latency of the def, assume it's 2. 2303 DefCycle = 2; 2304 2305 int UseCycle = -1; 2306 switch (UseMCID.getOpcode()) { 2307 default: 2308 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 2309 break; 2310 2311 case ARM::VSTMDIA: 2312 case ARM::VSTMDIA_UPD: 2313 case ARM::VSTMDDB_UPD: 2314 case ARM::VSTMSIA: 2315 case ARM::VSTMSIA_UPD: 2316 case ARM::VSTMSDB_UPD: 2317 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 2318 break; 2319 2320 case ARM::STMIA: 2321 case ARM::STMDA: 2322 case ARM::STMDB: 2323 case ARM::STMIB: 2324 case ARM::STMIA_UPD: 2325 case ARM::STMDA_UPD: 2326 case ARM::STMDB_UPD: 2327 case ARM::STMIB_UPD: 2328 case ARM::tSTMIA_UPD: 2329 case ARM::tPOP_RET: 2330 case ARM::tPOP: 2331 case ARM::t2STMIA: 2332 case ARM::t2STMDB: 2333 case ARM::t2STMIA_UPD: 2334 case ARM::t2STMDB_UPD: 2335 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 2336 break; 2337 } 2338 2339 if (UseCycle == -1) 2340 // Assume it's read in the first stage. 2341 UseCycle = 1; 2342 2343 UseCycle = DefCycle - UseCycle + 1; 2344 if (UseCycle > 0) { 2345 if (LdmBypass) { 2346 // It's a variable_ops instruction so we can't use DefIdx here. Just use 2347 // first def operand. 2348 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 2349 UseClass, UseIdx)) 2350 --UseCycle; 2351 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 2352 UseClass, UseIdx)) { 2353 --UseCycle; 2354 } 2355 } 2356 2357 return UseCycle; 2358} 2359 2360static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 2361 const MachineInstr *MI, unsigned Reg, 2362 unsigned &DefIdx, unsigned &Dist) { 2363 Dist = 0; 2364 2365 MachineBasicBlock::const_iterator I = MI; ++I; 2366 MachineBasicBlock::const_instr_iterator II = 2367 llvm::prior(I.getInstrIterator()); 2368 assert(II->isInsideBundle() && "Empty bundle?"); 2369 2370 int Idx = -1; 2371 while (II->isInsideBundle()) { 2372 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 2373 if (Idx != -1) 2374 break; 2375 --II; 2376 ++Dist; 2377 } 2378 2379 assert(Idx != -1 && "Cannot find bundled definition!"); 2380 DefIdx = Idx; 2381 return II; 2382} 2383 2384static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 2385 const MachineInstr *MI, unsigned Reg, 2386 unsigned &UseIdx, unsigned &Dist) { 2387 Dist = 0; 2388 2389 MachineBasicBlock::const_instr_iterator II = MI; ++II; 2390 assert(II->isInsideBundle() && "Empty bundle?"); 2391 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 2392 2393 // FIXME: This doesn't properly handle multiple uses. 2394 int Idx = -1; 2395 while (II != E && II->isInsideBundle()) { 2396 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 2397 if (Idx != -1) 2398 break; 2399 if (II->getOpcode() != ARM::t2IT) 2400 ++Dist; 2401 ++II; 2402 } 2403 2404 if (Idx == -1) { 2405 Dist = 0; 2406 return 0; 2407 } 2408 2409 UseIdx = Idx; 2410 return II; 2411} 2412 2413int 2414ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2415 const MachineInstr *DefMI, unsigned DefIdx, 2416 const MachineInstr *UseMI, unsigned UseIdx) const { 2417 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() || 2418 DefMI->isRegSequence() || DefMI->isImplicitDef()) 2419 return 1; 2420 2421 if (!ItinData || ItinData->isEmpty()) 2422 return DefMI->mayLoad() ? 3 : 1; 2423 2424 const MCInstrDesc *DefMCID = &DefMI->getDesc(); 2425 const MCInstrDesc *UseMCID = &UseMI->getDesc(); 2426 const MachineOperand &DefMO = DefMI->getOperand(DefIdx); 2427 unsigned Reg = DefMO.getReg(); 2428 if (Reg == ARM::CPSR) { 2429 if (DefMI->getOpcode() == ARM::FMSTAT) { 2430 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 2431 return Subtarget.isCortexA9() ? 1 : 20; 2432 } 2433 2434 // CPSR set and branch can be paired in the same cycle. 2435 if (UseMI->isBranch()) 2436 return 0; 2437 2438 // Otherwise it takes the instruction latency (generally one). 2439 int Latency = getInstrLatency(ItinData, DefMI); 2440 2441 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 2442 // its uses. Instructions which are otherwise scheduled between them may 2443 // incur a code size penalty (not able to use the CPSR setting 16-bit 2444 // instructions). 2445 if (Latency > 0 && Subtarget.isThumb2()) { 2446 const MachineFunction *MF = DefMI->getParent()->getParent(); 2447 if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize)) 2448 --Latency; 2449 } 2450 return Latency; 2451 } 2452 2453 unsigned DefAlign = DefMI->hasOneMemOperand() 2454 ? (*DefMI->memoperands_begin())->getAlignment() : 0; 2455 unsigned UseAlign = UseMI->hasOneMemOperand() 2456 ? (*UseMI->memoperands_begin())->getAlignment() : 0; 2457 2458 unsigned DefAdj = 0; 2459 if (DefMI->isBundle()) { 2460 DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj); 2461 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() || 2462 DefMI->isRegSequence() || DefMI->isImplicitDef()) 2463 return 1; 2464 DefMCID = &DefMI->getDesc(); 2465 } 2466 unsigned UseAdj = 0; 2467 if (UseMI->isBundle()) { 2468 unsigned NewUseIdx; 2469 const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI, 2470 Reg, NewUseIdx, UseAdj); 2471 if (NewUseMI) { 2472 UseMI = NewUseMI; 2473 UseIdx = NewUseIdx; 2474 UseMCID = &UseMI->getDesc(); 2475 } 2476 } 2477 2478 int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign, 2479 *UseMCID, UseIdx, UseAlign); 2480 int Adj = DefAdj + UseAdj; 2481 if (Adj) { 2482 Latency -= (int)(DefAdj + UseAdj); 2483 if (Latency < 1) 2484 return 1; 2485 } 2486 2487 if (Latency > 1 && 2488 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { 2489 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 2490 // variants are one cycle cheaper. 2491 switch (DefMCID->getOpcode()) { 2492 default: break; 2493 case ARM::LDRrs: 2494 case ARM::LDRBrs: { 2495 unsigned ShOpVal = DefMI->getOperand(3).getImm(); 2496 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2497 if (ShImm == 0 || 2498 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 2499 --Latency; 2500 break; 2501 } 2502 case ARM::t2LDRs: 2503 case ARM::t2LDRBs: 2504 case ARM::t2LDRHs: 2505 case ARM::t2LDRSHs: { 2506 // Thumb2 mode: lsl only. 2507 unsigned ShAmt = DefMI->getOperand(3).getImm(); 2508 if (ShAmt == 0 || ShAmt == 2) 2509 --Latency; 2510 break; 2511 } 2512 } 2513 } 2514 2515 if (DefAlign < 8 && Subtarget.isCortexA9()) 2516 switch (DefMCID->getOpcode()) { 2517 default: break; 2518 case ARM::VLD1q8: 2519 case ARM::VLD1q16: 2520 case ARM::VLD1q32: 2521 case ARM::VLD1q64: 2522 case ARM::VLD1q8wb_fixed: 2523 case ARM::VLD1q16wb_fixed: 2524 case ARM::VLD1q32wb_fixed: 2525 case ARM::VLD1q64wb_fixed: 2526 case ARM::VLD1q8wb_register: 2527 case ARM::VLD1q16wb_register: 2528 case ARM::VLD1q32wb_register: 2529 case ARM::VLD1q64wb_register: 2530 case ARM::VLD2d8: 2531 case ARM::VLD2d16: 2532 case ARM::VLD2d32: 2533 case ARM::VLD2q8: 2534 case ARM::VLD2q16: 2535 case ARM::VLD2q32: 2536 case ARM::VLD2d8wb_fixed: 2537 case ARM::VLD2d16wb_fixed: 2538 case ARM::VLD2d32wb_fixed: 2539 case ARM::VLD2q8wb_fixed: 2540 case ARM::VLD2q16wb_fixed: 2541 case ARM::VLD2q32wb_fixed: 2542 case ARM::VLD2d8wb_register: 2543 case ARM::VLD2d16wb_register: 2544 case ARM::VLD2d32wb_register: 2545 case ARM::VLD2q8wb_register: 2546 case ARM::VLD2q16wb_register: 2547 case ARM::VLD2q32wb_register: 2548 case ARM::VLD3d8: 2549 case ARM::VLD3d16: 2550 case ARM::VLD3d32: 2551 case ARM::VLD1d64T: 2552 case ARM::VLD3d8_UPD: 2553 case ARM::VLD3d16_UPD: 2554 case ARM::VLD3d32_UPD: 2555 case ARM::VLD1d64Twb_fixed: 2556 case ARM::VLD1d64Twb_register: 2557 case ARM::VLD3q8_UPD: 2558 case ARM::VLD3q16_UPD: 2559 case ARM::VLD3q32_UPD: 2560 case ARM::VLD4d8: 2561 case ARM::VLD4d16: 2562 case ARM::VLD4d32: 2563 case ARM::VLD1d64Q: 2564 case ARM::VLD4d8_UPD: 2565 case ARM::VLD4d16_UPD: 2566 case ARM::VLD4d32_UPD: 2567 case ARM::VLD1d64Qwb_fixed: 2568 case ARM::VLD1d64Qwb_register: 2569 case ARM::VLD4q8_UPD: 2570 case ARM::VLD4q16_UPD: 2571 case ARM::VLD4q32_UPD: 2572 case ARM::VLD1DUPq8: 2573 case ARM::VLD1DUPq16: 2574 case ARM::VLD1DUPq32: 2575 case ARM::VLD1DUPq8wb_fixed: 2576 case ARM::VLD1DUPq16wb_fixed: 2577 case ARM::VLD1DUPq32wb_fixed: 2578 case ARM::VLD1DUPq8wb_register: 2579 case ARM::VLD1DUPq16wb_register: 2580 case ARM::VLD1DUPq32wb_register: 2581 case ARM::VLD2DUPd8: 2582 case ARM::VLD2DUPd16: 2583 case ARM::VLD2DUPd32: 2584 case ARM::VLD2DUPd8wb_fixed: 2585 case ARM::VLD2DUPd16wb_fixed: 2586 case ARM::VLD2DUPd32wb_fixed: 2587 case ARM::VLD2DUPd8wb_register: 2588 case ARM::VLD2DUPd16wb_register: 2589 case ARM::VLD2DUPd32wb_register: 2590 case ARM::VLD4DUPd8: 2591 case ARM::VLD4DUPd16: 2592 case ARM::VLD4DUPd32: 2593 case ARM::VLD4DUPd8_UPD: 2594 case ARM::VLD4DUPd16_UPD: 2595 case ARM::VLD4DUPd32_UPD: 2596 case ARM::VLD1LNd8: 2597 case ARM::VLD1LNd16: 2598 case ARM::VLD1LNd32: 2599 case ARM::VLD1LNd8_UPD: 2600 case ARM::VLD1LNd16_UPD: 2601 case ARM::VLD1LNd32_UPD: 2602 case ARM::VLD2LNd8: 2603 case ARM::VLD2LNd16: 2604 case ARM::VLD2LNd32: 2605 case ARM::VLD2LNq16: 2606 case ARM::VLD2LNq32: 2607 case ARM::VLD2LNd8_UPD: 2608 case ARM::VLD2LNd16_UPD: 2609 case ARM::VLD2LNd32_UPD: 2610 case ARM::VLD2LNq16_UPD: 2611 case ARM::VLD2LNq32_UPD: 2612 case ARM::VLD4LNd8: 2613 case ARM::VLD4LNd16: 2614 case ARM::VLD4LNd32: 2615 case ARM::VLD4LNq16: 2616 case ARM::VLD4LNq32: 2617 case ARM::VLD4LNd8_UPD: 2618 case ARM::VLD4LNd16_UPD: 2619 case ARM::VLD4LNd32_UPD: 2620 case ARM::VLD4LNq16_UPD: 2621 case ARM::VLD4LNq32_UPD: 2622 // If the address is not 64-bit aligned, the latencies of these 2623 // instructions increases by one. 2624 ++Latency; 2625 break; 2626 } 2627 2628 return Latency; 2629} 2630 2631int 2632ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2633 SDNode *DefNode, unsigned DefIdx, 2634 SDNode *UseNode, unsigned UseIdx) const { 2635 if (!DefNode->isMachineOpcode()) 2636 return 1; 2637 2638 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 2639 2640 if (isZeroCost(DefMCID.Opcode)) 2641 return 0; 2642 2643 if (!ItinData || ItinData->isEmpty()) 2644 return DefMCID.mayLoad() ? 3 : 1; 2645 2646 if (!UseNode->isMachineOpcode()) { 2647 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 2648 if (Subtarget.isCortexA9()) 2649 return Latency <= 2 ? 1 : Latency - 1; 2650 else 2651 return Latency <= 3 ? 1 : Latency - 2; 2652 } 2653 2654 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 2655 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode); 2656 unsigned DefAlign = !DefMN->memoperands_empty() 2657 ? (*DefMN->memoperands_begin())->getAlignment() : 0; 2658 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode); 2659 unsigned UseAlign = !UseMN->memoperands_empty() 2660 ? (*UseMN->memoperands_begin())->getAlignment() : 0; 2661 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 2662 UseMCID, UseIdx, UseAlign); 2663 2664 if (Latency > 1 && 2665 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { 2666 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 2667 // variants are one cycle cheaper. 2668 switch (DefMCID.getOpcode()) { 2669 default: break; 2670 case ARM::LDRrs: 2671 case ARM::LDRBrs: { 2672 unsigned ShOpVal = 2673 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 2674 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2675 if (ShImm == 0 || 2676 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 2677 --Latency; 2678 break; 2679 } 2680 case ARM::t2LDRs: 2681 case ARM::t2LDRBs: 2682 case ARM::t2LDRHs: 2683 case ARM::t2LDRSHs: { 2684 // Thumb2 mode: lsl only. 2685 unsigned ShAmt = 2686 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 2687 if (ShAmt == 0 || ShAmt == 2) 2688 --Latency; 2689 break; 2690 } 2691 } 2692 } 2693 2694 if (DefAlign < 8 && Subtarget.isCortexA9()) 2695 switch (DefMCID.getOpcode()) { 2696 default: break; 2697 case ARM::VLD1q8Pseudo: 2698 case ARM::VLD1q16Pseudo: 2699 case ARM::VLD1q32Pseudo: 2700 case ARM::VLD1q64Pseudo: 2701 case ARM::VLD1q8PseudoWB_register: 2702 case ARM::VLD1q16PseudoWB_register: 2703 case ARM::VLD1q32PseudoWB_register: 2704 case ARM::VLD1q64PseudoWB_register: 2705 case ARM::VLD1q8PseudoWB_fixed: 2706 case ARM::VLD1q16PseudoWB_fixed: 2707 case ARM::VLD1q32PseudoWB_fixed: 2708 case ARM::VLD1q64PseudoWB_fixed: 2709 case ARM::VLD2d8Pseudo: 2710 case ARM::VLD2d16Pseudo: 2711 case ARM::VLD2d32Pseudo: 2712 case ARM::VLD2q8Pseudo: 2713 case ARM::VLD2q16Pseudo: 2714 case ARM::VLD2q32Pseudo: 2715 case ARM::VLD2d8PseudoWB_fixed: 2716 case ARM::VLD2d16PseudoWB_fixed: 2717 case ARM::VLD2d32PseudoWB_fixed: 2718 case ARM::VLD2q8PseudoWB_fixed: 2719 case ARM::VLD2q16PseudoWB_fixed: 2720 case ARM::VLD2q32PseudoWB_fixed: 2721 case ARM::VLD2d8PseudoWB_register: 2722 case ARM::VLD2d16PseudoWB_register: 2723 case ARM::VLD2d32PseudoWB_register: 2724 case ARM::VLD2q8PseudoWB_register: 2725 case ARM::VLD2q16PseudoWB_register: 2726 case ARM::VLD2q32PseudoWB_register: 2727 case ARM::VLD3d8Pseudo: 2728 case ARM::VLD3d16Pseudo: 2729 case ARM::VLD3d32Pseudo: 2730 case ARM::VLD1d64TPseudo: 2731 case ARM::VLD3d8Pseudo_UPD: 2732 case ARM::VLD3d16Pseudo_UPD: 2733 case ARM::VLD3d32Pseudo_UPD: 2734 case ARM::VLD3q8Pseudo_UPD: 2735 case ARM::VLD3q16Pseudo_UPD: 2736 case ARM::VLD3q32Pseudo_UPD: 2737 case ARM::VLD3q8oddPseudo: 2738 case ARM::VLD3q16oddPseudo: 2739 case ARM::VLD3q32oddPseudo: 2740 case ARM::VLD3q8oddPseudo_UPD: 2741 case ARM::VLD3q16oddPseudo_UPD: 2742 case ARM::VLD3q32oddPseudo_UPD: 2743 case ARM::VLD4d8Pseudo: 2744 case ARM::VLD4d16Pseudo: 2745 case ARM::VLD4d32Pseudo: 2746 case ARM::VLD1d64QPseudo: 2747 case ARM::VLD4d8Pseudo_UPD: 2748 case ARM::VLD4d16Pseudo_UPD: 2749 case ARM::VLD4d32Pseudo_UPD: 2750 case ARM::VLD4q8Pseudo_UPD: 2751 case ARM::VLD4q16Pseudo_UPD: 2752 case ARM::VLD4q32Pseudo_UPD: 2753 case ARM::VLD4q8oddPseudo: 2754 case ARM::VLD4q16oddPseudo: 2755 case ARM::VLD4q32oddPseudo: 2756 case ARM::VLD4q8oddPseudo_UPD: 2757 case ARM::VLD4q16oddPseudo_UPD: 2758 case ARM::VLD4q32oddPseudo_UPD: 2759 case ARM::VLD1DUPq8Pseudo: 2760 case ARM::VLD1DUPq16Pseudo: 2761 case ARM::VLD1DUPq32Pseudo: 2762 case ARM::VLD1DUPq8PseudoWB_fixed: 2763 case ARM::VLD1DUPq16PseudoWB_fixed: 2764 case ARM::VLD1DUPq32PseudoWB_fixed: 2765 case ARM::VLD1DUPq8PseudoWB_register: 2766 case ARM::VLD1DUPq16PseudoWB_register: 2767 case ARM::VLD1DUPq32PseudoWB_register: 2768 case ARM::VLD2DUPd8Pseudo: 2769 case ARM::VLD2DUPd16Pseudo: 2770 case ARM::VLD2DUPd32Pseudo: 2771 case ARM::VLD2DUPd8PseudoWB_fixed: 2772 case ARM::VLD2DUPd16PseudoWB_fixed: 2773 case ARM::VLD2DUPd32PseudoWB_fixed: 2774 case ARM::VLD2DUPd8PseudoWB_register: 2775 case ARM::VLD2DUPd16PseudoWB_register: 2776 case ARM::VLD2DUPd32PseudoWB_register: 2777 case ARM::VLD4DUPd8Pseudo: 2778 case ARM::VLD4DUPd16Pseudo: 2779 case ARM::VLD4DUPd32Pseudo: 2780 case ARM::VLD4DUPd8Pseudo_UPD: 2781 case ARM::VLD4DUPd16Pseudo_UPD: 2782 case ARM::VLD4DUPd32Pseudo_UPD: 2783 case ARM::VLD1LNq8Pseudo: 2784 case ARM::VLD1LNq16Pseudo: 2785 case ARM::VLD1LNq32Pseudo: 2786 case ARM::VLD1LNq8Pseudo_UPD: 2787 case ARM::VLD1LNq16Pseudo_UPD: 2788 case ARM::VLD1LNq32Pseudo_UPD: 2789 case ARM::VLD2LNd8Pseudo: 2790 case ARM::VLD2LNd16Pseudo: 2791 case ARM::VLD2LNd32Pseudo: 2792 case ARM::VLD2LNq16Pseudo: 2793 case ARM::VLD2LNq32Pseudo: 2794 case ARM::VLD2LNd8Pseudo_UPD: 2795 case ARM::VLD2LNd16Pseudo_UPD: 2796 case ARM::VLD2LNd32Pseudo_UPD: 2797 case ARM::VLD2LNq16Pseudo_UPD: 2798 case ARM::VLD2LNq32Pseudo_UPD: 2799 case ARM::VLD4LNd8Pseudo: 2800 case ARM::VLD4LNd16Pseudo: 2801 case ARM::VLD4LNd32Pseudo: 2802 case ARM::VLD4LNq16Pseudo: 2803 case ARM::VLD4LNq32Pseudo: 2804 case ARM::VLD4LNd8Pseudo_UPD: 2805 case ARM::VLD4LNd16Pseudo_UPD: 2806 case ARM::VLD4LNd32Pseudo_UPD: 2807 case ARM::VLD4LNq16Pseudo_UPD: 2808 case ARM::VLD4LNq32Pseudo_UPD: 2809 // If the address is not 64-bit aligned, the latencies of these 2810 // instructions increases by one. 2811 ++Latency; 2812 break; 2813 } 2814 2815 return Latency; 2816} 2817 2818unsigned 2819ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData, 2820 const MachineInstr *DefMI, unsigned DefIdx, 2821 const MachineInstr *DepMI) const { 2822 unsigned Reg = DefMI->getOperand(DefIdx).getReg(); 2823 if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI)) 2824 return 1; 2825 2826 // If the second MI is predicated, then there is an implicit use dependency. 2827 return getOperandLatency(ItinData, DefMI, DefIdx, DepMI, 2828 DepMI->getNumOperands()); 2829} 2830 2831int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 2832 const MachineInstr *MI, 2833 unsigned *PredCost) const { 2834 if (MI->isCopyLike() || MI->isInsertSubreg() || 2835 MI->isRegSequence() || MI->isImplicitDef()) 2836 return 1; 2837 2838 if (!ItinData || ItinData->isEmpty()) 2839 return 1; 2840 2841 if (MI->isBundle()) { 2842 int Latency = 0; 2843 MachineBasicBlock::const_instr_iterator I = MI; 2844 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 2845 while (++I != E && I->isInsideBundle()) { 2846 if (I->getOpcode() != ARM::t2IT) 2847 Latency += getInstrLatency(ItinData, I, PredCost); 2848 } 2849 return Latency; 2850 } 2851 2852 const MCInstrDesc &MCID = MI->getDesc(); 2853 unsigned Class = MCID.getSchedClass(); 2854 unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; 2855 if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) 2856 // When predicated, CPSR is an additional source operand for CPSR updating 2857 // instructions, this apparently increases their latencies. 2858 *PredCost = 1; 2859 if (UOps) 2860 return ItinData->getStageLatency(Class); 2861 return getNumMicroOps(ItinData, MI); 2862} 2863 2864int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 2865 SDNode *Node) const { 2866 if (!Node->isMachineOpcode()) 2867 return 1; 2868 2869 if (!ItinData || ItinData->isEmpty()) 2870 return 1; 2871 2872 unsigned Opcode = Node->getMachineOpcode(); 2873 switch (Opcode) { 2874 default: 2875 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 2876 case ARM::VLDMQIA: 2877 case ARM::VSTMQIA: 2878 return 2; 2879 } 2880} 2881 2882bool ARMBaseInstrInfo:: 2883hasHighOperandLatency(const InstrItineraryData *ItinData, 2884 const MachineRegisterInfo *MRI, 2885 const MachineInstr *DefMI, unsigned DefIdx, 2886 const MachineInstr *UseMI, unsigned UseIdx) const { 2887 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 2888 unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask; 2889 if (Subtarget.isCortexA8() && 2890 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 2891 // CortexA8 VFP instructions are not pipelined. 2892 return true; 2893 2894 // Hoist VFP / NEON instructions with 4 or higher latency. 2895 int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); 2896 if (Latency <= 3) 2897 return false; 2898 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 2899 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 2900} 2901 2902bool ARMBaseInstrInfo:: 2903hasLowDefLatency(const InstrItineraryData *ItinData, 2904 const MachineInstr *DefMI, unsigned DefIdx) const { 2905 if (!ItinData || ItinData->isEmpty()) 2906 return false; 2907 2908 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 2909 if (DDomain == ARMII::DomainGeneral) { 2910 unsigned DefClass = DefMI->getDesc().getSchedClass(); 2911 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 2912 return (DefCycle != -1 && DefCycle <= 2); 2913 } 2914 return false; 2915} 2916 2917bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI, 2918 StringRef &ErrInfo) const { 2919 if (convertAddSubFlagsOpcode(MI->getOpcode())) { 2920 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 2921 return false; 2922 } 2923 return true; 2924} 2925 2926bool 2927ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 2928 unsigned &AddSubOpc, 2929 bool &NegAcc, bool &HasLane) const { 2930 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 2931 if (I == MLxEntryMap.end()) 2932 return false; 2933 2934 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 2935 MulOpc = Entry.MulOpc; 2936 AddSubOpc = Entry.AddSubOpc; 2937 NegAcc = Entry.NegAcc; 2938 HasLane = Entry.HasLane; 2939 return true; 2940} 2941 2942//===----------------------------------------------------------------------===// 2943// Execution domains. 2944//===----------------------------------------------------------------------===// 2945// 2946// Some instructions go down the NEON pipeline, some go down the VFP pipeline, 2947// and some can go down both. The vmov instructions go down the VFP pipeline, 2948// but they can be changed to vorr equivalents that are executed by the NEON 2949// pipeline. 2950// 2951// We use the following execution domain numbering: 2952// 2953enum ARMExeDomain { 2954 ExeGeneric = 0, 2955 ExeVFP = 1, 2956 ExeNEON = 2 2957}; 2958// 2959// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 2960// 2961std::pair<uint16_t, uint16_t> 2962ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { 2963 // VMOVD is a VFP instruction, but can be changed to NEON if it isn't 2964 // predicated. 2965 if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) 2966 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); 2967 2968 // No other instructions can be swizzled, so just determine their domain. 2969 unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask; 2970 2971 if (Domain & ARMII::DomainNEON) 2972 return std::make_pair(ExeNEON, 0); 2973 2974 // Certain instructions can go either way on Cortex-A8. 2975 // Treat them as NEON instructions. 2976 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 2977 return std::make_pair(ExeNEON, 0); 2978 2979 if (Domain & ARMII::DomainVFP) 2980 return std::make_pair(ExeVFP, 0); 2981 2982 return std::make_pair(ExeGeneric, 0); 2983} 2984 2985void 2986ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 2987 // We only know how to change VMOVD into VORR. 2988 assert(MI->getOpcode() == ARM::VMOVD && "Can only swizzle VMOVD"); 2989 if (Domain != ExeNEON) 2990 return; 2991 2992 // Zap the predicate operands. 2993 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 2994 MI->RemoveOperand(3); 2995 MI->RemoveOperand(2); 2996 2997 // Change to a VORRd which requires two identical use operands. 2998 MI->setDesc(get(ARM::VORRd)); 2999 3000 // Add the extra source operand and new predicates. 3001 // This will go before any implicit ops. 3002 AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1))); 3003} 3004 3005bool ARMBaseInstrInfo::hasNOP() const { 3006 return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0; 3007} 3008