ARMBaseInstrInfo.cpp revision ed7a51e69209af87f3749d5f95740f69a1dc7711
1//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the Base ARM implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARMBaseInstrInfo.h" 15#include "ARM.h" 16#include "ARMBaseRegisterInfo.h" 17#include "ARMConstantPoolValue.h" 18#include "ARMHazardRecognizer.h" 19#include "ARMMachineFunctionInfo.h" 20#include "MCTargetDesc/ARMAddressingModes.h" 21#include "llvm/Constants.h" 22#include "llvm/Function.h" 23#include "llvm/GlobalValue.h" 24#include "llvm/CodeGen/LiveVariables.h" 25#include "llvm/CodeGen/MachineConstantPool.h" 26#include "llvm/CodeGen/MachineFrameInfo.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineJumpTableInfo.h" 29#include "llvm/CodeGen/MachineMemOperand.h" 30#include "llvm/CodeGen/MachineRegisterInfo.h" 31#include "llvm/CodeGen/SelectionDAGNodes.h" 32#include "llvm/MC/MCAsmInfo.h" 33#include "llvm/Support/BranchProbability.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/ADT/STLExtras.h" 38 39#define GET_INSTRINFO_CTOR 40#include "ARMGenInstrInfo.inc" 41 42using namespace llvm; 43 44static cl::opt<bool> 45EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 46 cl::desc("Enable ARM 2-addr to 3-addr conv")); 47 48static cl::opt<bool> 49WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true), 50 cl::desc("Widen ARM vmovs to vmovd when possible")); 51 52/// ARM_MLxEntry - Record information about MLA / MLS instructions. 53struct ARM_MLxEntry { 54 uint16_t MLxOpc; // MLA / MLS opcode 55 uint16_t MulOpc; // Expanded multiplication opcode 56 uint16_t AddSubOpc; // Expanded add / sub opcode 57 bool NegAcc; // True if the acc is negated before the add / sub. 58 bool HasLane; // True if instruction has an extra "lane" operand. 59}; 60 61static const ARM_MLxEntry ARM_MLxTable[] = { 62 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 63 // fp scalar ops 64 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 65 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 66 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 67 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 68 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 69 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 70 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 71 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 72 73 // fp SIMD ops 74 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 75 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 76 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 77 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 78 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 79 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 80 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 81 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 82}; 83 84ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 85 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 86 Subtarget(STI) { 87 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 88 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 89 assert(false && "Duplicated entries?"); 90 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 91 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 92 } 93} 94 95// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 96// currently defaults to no prepass hazard recognizer. 97ScheduleHazardRecognizer *ARMBaseInstrInfo:: 98CreateTargetHazardRecognizer(const TargetMachine *TM, 99 const ScheduleDAG *DAG) const { 100 if (usePreRAHazardRecognizer()) { 101 const InstrItineraryData *II = TM->getInstrItineraryData(); 102 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 103 } 104 return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG); 105} 106 107ScheduleHazardRecognizer *ARMBaseInstrInfo:: 108CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 109 const ScheduleDAG *DAG) const { 110 if (Subtarget.isThumb2() || Subtarget.hasVFP2()) 111 return (ScheduleHazardRecognizer *) 112 new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG); 113 return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG); 114} 115 116MachineInstr * 117ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 118 MachineBasicBlock::iterator &MBBI, 119 LiveVariables *LV) const { 120 // FIXME: Thumb2 support. 121 122 if (!EnableARM3Addr) 123 return NULL; 124 125 MachineInstr *MI = MBBI; 126 MachineFunction &MF = *MI->getParent()->getParent(); 127 uint64_t TSFlags = MI->getDesc().TSFlags; 128 bool isPre = false; 129 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 130 default: return NULL; 131 case ARMII::IndexModePre: 132 isPre = true; 133 break; 134 case ARMII::IndexModePost: 135 break; 136 } 137 138 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 139 // operation. 140 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode()); 141 if (MemOpc == 0) 142 return NULL; 143 144 MachineInstr *UpdateMI = NULL; 145 MachineInstr *MemMI = NULL; 146 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 147 const MCInstrDesc &MCID = MI->getDesc(); 148 unsigned NumOps = MCID.getNumOperands(); 149 bool isLoad = !MI->mayStore(); 150 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); 151 const MachineOperand &Base = MI->getOperand(2); 152 const MachineOperand &Offset = MI->getOperand(NumOps-3); 153 unsigned WBReg = WB.getReg(); 154 unsigned BaseReg = Base.getReg(); 155 unsigned OffReg = Offset.getReg(); 156 unsigned OffImm = MI->getOperand(NumOps-2).getImm(); 157 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm(); 158 switch (AddrMode) { 159 default: llvm_unreachable("Unknown indexed op!"); 160 case ARMII::AddrMode2: { 161 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 162 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 163 if (OffReg == 0) { 164 if (ARM_AM::getSOImmVal(Amt) == -1) 165 // Can't encode it in a so_imm operand. This transformation will 166 // add more than 1 instruction. Abandon! 167 return NULL; 168 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 169 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 170 .addReg(BaseReg).addImm(Amt) 171 .addImm(Pred).addReg(0).addReg(0); 172 } else if (Amt != 0) { 173 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 174 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 175 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 176 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 177 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc) 178 .addImm(Pred).addReg(0).addReg(0); 179 } else 180 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 181 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 182 .addReg(BaseReg).addReg(OffReg) 183 .addImm(Pred).addReg(0).addReg(0); 184 break; 185 } 186 case ARMII::AddrMode3 : { 187 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 188 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 189 if (OffReg == 0) 190 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 191 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 192 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 193 .addReg(BaseReg).addImm(Amt) 194 .addImm(Pred).addReg(0).addReg(0); 195 else 196 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 197 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 198 .addReg(BaseReg).addReg(OffReg) 199 .addImm(Pred).addReg(0).addReg(0); 200 break; 201 } 202 } 203 204 std::vector<MachineInstr*> NewMIs; 205 if (isPre) { 206 if (isLoad) 207 MemMI = BuildMI(MF, MI->getDebugLoc(), 208 get(MemOpc), MI->getOperand(0).getReg()) 209 .addReg(WBReg).addImm(0).addImm(Pred); 210 else 211 MemMI = BuildMI(MF, MI->getDebugLoc(), 212 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 213 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred); 214 NewMIs.push_back(MemMI); 215 NewMIs.push_back(UpdateMI); 216 } else { 217 if (isLoad) 218 MemMI = BuildMI(MF, MI->getDebugLoc(), 219 get(MemOpc), MI->getOperand(0).getReg()) 220 .addReg(BaseReg).addImm(0).addImm(Pred); 221 else 222 MemMI = BuildMI(MF, MI->getDebugLoc(), 223 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 224 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred); 225 if (WB.isDead()) 226 UpdateMI->getOperand(0).setIsDead(); 227 NewMIs.push_back(UpdateMI); 228 NewMIs.push_back(MemMI); 229 } 230 231 // Transfer LiveVariables states, kill / dead info. 232 if (LV) { 233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 234 MachineOperand &MO = MI->getOperand(i); 235 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 236 unsigned Reg = MO.getReg(); 237 238 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 239 if (MO.isDef()) { 240 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 241 if (MO.isDead()) 242 LV->addVirtualRegisterDead(Reg, NewMI); 243 } 244 if (MO.isUse() && MO.isKill()) { 245 for (unsigned j = 0; j < 2; ++j) { 246 // Look at the two new MI's in reverse order. 247 MachineInstr *NewMI = NewMIs[j]; 248 if (!NewMI->readsRegister(Reg)) 249 continue; 250 LV->addVirtualRegisterKilled(Reg, NewMI); 251 if (VI.removeKill(MI)) 252 VI.Kills.push_back(NewMI); 253 break; 254 } 255 } 256 } 257 } 258 } 259 260 MFI->insert(MBBI, NewMIs[1]); 261 MFI->insert(MBBI, NewMIs[0]); 262 return NewMIs[0]; 263} 264 265// Branch analysis. 266bool 267ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 268 MachineBasicBlock *&FBB, 269 SmallVectorImpl<MachineOperand> &Cond, 270 bool AllowModify) const { 271 // If the block has no terminators, it just falls into the block after it. 272 MachineBasicBlock::iterator I = MBB.end(); 273 if (I == MBB.begin()) 274 return false; 275 --I; 276 while (I->isDebugValue()) { 277 if (I == MBB.begin()) 278 return false; 279 --I; 280 } 281 if (!isUnpredicatedTerminator(I)) 282 return false; 283 284 // Get the last instruction in the block. 285 MachineInstr *LastInst = I; 286 287 // If there is only one terminator instruction, process it. 288 unsigned LastOpc = LastInst->getOpcode(); 289 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 290 if (isUncondBranchOpcode(LastOpc)) { 291 TBB = LastInst->getOperand(0).getMBB(); 292 return false; 293 } 294 if (isCondBranchOpcode(LastOpc)) { 295 // Block ends with fall-through condbranch. 296 TBB = LastInst->getOperand(0).getMBB(); 297 Cond.push_back(LastInst->getOperand(1)); 298 Cond.push_back(LastInst->getOperand(2)); 299 return false; 300 } 301 return true; // Can't handle indirect branch. 302 } 303 304 // Get the instruction before it if it is a terminator. 305 MachineInstr *SecondLastInst = I; 306 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 307 308 // If AllowModify is true and the block ends with two or more unconditional 309 // branches, delete all but the first unconditional branch. 310 if (AllowModify && isUncondBranchOpcode(LastOpc)) { 311 while (isUncondBranchOpcode(SecondLastOpc)) { 312 LastInst->eraseFromParent(); 313 LastInst = SecondLastInst; 314 LastOpc = LastInst->getOpcode(); 315 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 316 // Return now the only terminator is an unconditional branch. 317 TBB = LastInst->getOperand(0).getMBB(); 318 return false; 319 } else { 320 SecondLastInst = I; 321 SecondLastOpc = SecondLastInst->getOpcode(); 322 } 323 } 324 } 325 326 // If there are three terminators, we don't know what sort of block this is. 327 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) 328 return true; 329 330 // If the block ends with a B and a Bcc, handle it. 331 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 332 TBB = SecondLastInst->getOperand(0).getMBB(); 333 Cond.push_back(SecondLastInst->getOperand(1)); 334 Cond.push_back(SecondLastInst->getOperand(2)); 335 FBB = LastInst->getOperand(0).getMBB(); 336 return false; 337 } 338 339 // If the block ends with two unconditional branches, handle it. The second 340 // one is not executed, so remove it. 341 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 342 TBB = SecondLastInst->getOperand(0).getMBB(); 343 I = LastInst; 344 if (AllowModify) 345 I->eraseFromParent(); 346 return false; 347 } 348 349 // ...likewise if it ends with a branch table followed by an unconditional 350 // branch. The branch folder can create these, and we must get rid of them for 351 // correctness of Thumb constant islands. 352 if ((isJumpTableBranchOpcode(SecondLastOpc) || 353 isIndirectBranchOpcode(SecondLastOpc)) && 354 isUncondBranchOpcode(LastOpc)) { 355 I = LastInst; 356 if (AllowModify) 357 I->eraseFromParent(); 358 return true; 359 } 360 361 // Otherwise, can't handle this. 362 return true; 363} 364 365 366unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 367 MachineBasicBlock::iterator I = MBB.end(); 368 if (I == MBB.begin()) return 0; 369 --I; 370 while (I->isDebugValue()) { 371 if (I == MBB.begin()) 372 return 0; 373 --I; 374 } 375 if (!isUncondBranchOpcode(I->getOpcode()) && 376 !isCondBranchOpcode(I->getOpcode())) 377 return 0; 378 379 // Remove the branch. 380 I->eraseFromParent(); 381 382 I = MBB.end(); 383 384 if (I == MBB.begin()) return 1; 385 --I; 386 if (!isCondBranchOpcode(I->getOpcode())) 387 return 1; 388 389 // Remove the branch. 390 I->eraseFromParent(); 391 return 2; 392} 393 394unsigned 395ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 396 MachineBasicBlock *FBB, 397 const SmallVectorImpl<MachineOperand> &Cond, 398 DebugLoc DL) const { 399 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 400 int BOpc = !AFI->isThumbFunction() 401 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 402 int BccOpc = !AFI->isThumbFunction() 403 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 404 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 405 406 // Shouldn't be a fall through. 407 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 408 assert((Cond.size() == 2 || Cond.size() == 0) && 409 "ARM branch conditions have two components!"); 410 411 if (FBB == 0) { 412 if (Cond.empty()) { // Unconditional branch? 413 if (isThumb) 414 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0); 415 else 416 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 417 } else 418 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 419 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 420 return 1; 421 } 422 423 // Two-way conditional branch. 424 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 425 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 426 if (isThumb) 427 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); 428 else 429 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 430 return 2; 431} 432 433bool ARMBaseInstrInfo:: 434ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 435 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 436 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 437 return false; 438} 439 440bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const { 441 if (MI->isBundle()) { 442 MachineBasicBlock::const_instr_iterator I = MI; 443 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 444 while (++I != E && I->isInsideBundle()) { 445 int PIdx = I->findFirstPredOperandIdx(); 446 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 447 return true; 448 } 449 return false; 450 } 451 452 int PIdx = MI->findFirstPredOperandIdx(); 453 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL; 454} 455 456bool ARMBaseInstrInfo:: 457PredicateInstruction(MachineInstr *MI, 458 const SmallVectorImpl<MachineOperand> &Pred) const { 459 unsigned Opc = MI->getOpcode(); 460 if (isUncondBranchOpcode(Opc)) { 461 MI->setDesc(get(getMatchingCondBranchOpcode(Opc))); 462 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm())); 463 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false)); 464 return true; 465 } 466 467 int PIdx = MI->findFirstPredOperandIdx(); 468 if (PIdx != -1) { 469 MachineOperand &PMO = MI->getOperand(PIdx); 470 PMO.setImm(Pred[0].getImm()); 471 MI->getOperand(PIdx+1).setReg(Pred[1].getReg()); 472 return true; 473 } 474 return false; 475} 476 477bool ARMBaseInstrInfo:: 478SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 479 const SmallVectorImpl<MachineOperand> &Pred2) const { 480 if (Pred1.size() > 2 || Pred2.size() > 2) 481 return false; 482 483 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 484 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 485 if (CC1 == CC2) 486 return true; 487 488 switch (CC1) { 489 default: 490 return false; 491 case ARMCC::AL: 492 return true; 493 case ARMCC::HS: 494 return CC2 == ARMCC::HI; 495 case ARMCC::LS: 496 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 497 case ARMCC::GE: 498 return CC2 == ARMCC::GT; 499 case ARMCC::LE: 500 return CC2 == ARMCC::LT; 501 } 502} 503 504bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, 505 std::vector<MachineOperand> &Pred) const { 506 bool Found = false; 507 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 508 const MachineOperand &MO = MI->getOperand(i); 509 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) || 510 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) { 511 Pred.push_back(MO); 512 Found = true; 513 } 514 } 515 516 return Found; 517} 518 519/// isPredicable - Return true if the specified instruction can be predicated. 520/// By default, this returns true for every instruction with a 521/// PredicateOperand. 522bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { 523 if (!MI->isPredicable()) 524 return false; 525 526 if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { 527 ARMFunctionInfo *AFI = 528 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>(); 529 return AFI->isThumb2Function(); 530 } 531 return true; 532} 533 534/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing. 535LLVM_ATTRIBUTE_NOINLINE 536static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 537 unsigned JTI); 538static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 539 unsigned JTI) { 540 assert(JTI < JT.size()); 541 return JT[JTI].MBBs.size(); 542} 543 544/// GetInstSize - Return the size of the specified MachineInstr. 545/// 546unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 547 const MachineBasicBlock &MBB = *MI->getParent(); 548 const MachineFunction *MF = MBB.getParent(); 549 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 550 551 const MCInstrDesc &MCID = MI->getDesc(); 552 if (MCID.getSize()) 553 return MCID.getSize(); 554 555 // If this machine instr is an inline asm, measure it. 556 if (MI->getOpcode() == ARM::INLINEASM) 557 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI); 558 if (MI->isLabel()) 559 return 0; 560 unsigned Opc = MI->getOpcode(); 561 switch (Opc) { 562 case TargetOpcode::IMPLICIT_DEF: 563 case TargetOpcode::KILL: 564 case TargetOpcode::PROLOG_LABEL: 565 case TargetOpcode::EH_LABEL: 566 case TargetOpcode::DBG_VALUE: 567 return 0; 568 case TargetOpcode::BUNDLE: 569 return getInstBundleLength(MI); 570 case ARM::MOVi16_ga_pcrel: 571 case ARM::MOVTi16_ga_pcrel: 572 case ARM::t2MOVi16_ga_pcrel: 573 case ARM::t2MOVTi16_ga_pcrel: 574 return 4; 575 case ARM::MOVi32imm: 576 case ARM::t2MOVi32imm: 577 return 8; 578 case ARM::CONSTPOOL_ENTRY: 579 // If this machine instr is a constant pool entry, its size is recorded as 580 // operand #2. 581 return MI->getOperand(2).getImm(); 582 case ARM::Int_eh_sjlj_longjmp: 583 return 16; 584 case ARM::tInt_eh_sjlj_longjmp: 585 return 10; 586 case ARM::Int_eh_sjlj_setjmp: 587 case ARM::Int_eh_sjlj_setjmp_nofp: 588 return 20; 589 case ARM::tInt_eh_sjlj_setjmp: 590 case ARM::t2Int_eh_sjlj_setjmp: 591 case ARM::t2Int_eh_sjlj_setjmp_nofp: 592 return 12; 593 case ARM::BR_JTr: 594 case ARM::BR_JTm: 595 case ARM::BR_JTadd: 596 case ARM::tBR_JTr: 597 case ARM::t2BR_JT: 598 case ARM::t2TBB_JT: 599 case ARM::t2TBH_JT: { 600 // These are jumptable branches, i.e. a branch followed by an inlined 601 // jumptable. The size is 4 + 4 * number of entries. For TBB, each 602 // entry is one byte; TBH two byte each. 603 unsigned EntrySize = (Opc == ARM::t2TBB_JT) 604 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); 605 unsigned NumOps = MCID.getNumOperands(); 606 MachineOperand JTOP = 607 MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2)); 608 unsigned JTI = JTOP.getIndex(); 609 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 610 assert(MJTI != 0); 611 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 612 assert(JTI < JT.size()); 613 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte 614 // 4 aligned. The assembler / linker may add 2 byte padding just before 615 // the JT entries. The size does not include this padding; the 616 // constant islands pass does separate bookkeeping for it. 617 // FIXME: If we know the size of the function is less than (1 << 16) *2 618 // bytes, we can use 16-bit entries instead. Then there won't be an 619 // alignment issue. 620 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4; 621 unsigned NumEntries = getNumJTEntries(JT, JTI); 622 if (Opc == ARM::t2TBB_JT && (NumEntries & 1)) 623 // Make sure the instruction that follows TBB is 2-byte aligned. 624 // FIXME: Constant island pass should insert an "ALIGN" instruction 625 // instead. 626 ++NumEntries; 627 return NumEntries * EntrySize + InstSize; 628 } 629 default: 630 // Otherwise, pseudo-instruction sizes are zero. 631 return 0; 632 } 633} 634 635unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const { 636 unsigned Size = 0; 637 MachineBasicBlock::const_instr_iterator I = MI; 638 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 639 while (++I != E && I->isInsideBundle()) { 640 assert(!I->isBundle() && "No nested bundle!"); 641 Size += GetInstSizeInBytes(&*I); 642 } 643 return Size; 644} 645 646void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 647 MachineBasicBlock::iterator I, DebugLoc DL, 648 unsigned DestReg, unsigned SrcReg, 649 bool KillSrc) const { 650 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 651 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 652 653 if (GPRDest && GPRSrc) { 654 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 655 .addReg(SrcReg, getKillRegState(KillSrc)))); 656 return; 657 } 658 659 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 660 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 661 662 unsigned Opc = 0; 663 if (SPRDest && SPRSrc) 664 Opc = ARM::VMOVS; 665 else if (GPRDest && SPRSrc) 666 Opc = ARM::VMOVRS; 667 else if (SPRDest && GPRSrc) 668 Opc = ARM::VMOVSR; 669 else if (ARM::DPRRegClass.contains(DestReg, SrcReg)) 670 Opc = ARM::VMOVD; 671 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 672 Opc = ARM::VORRq; 673 674 if (Opc) { 675 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 676 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 677 if (Opc == ARM::VORRq) 678 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 679 AddDefaultPred(MIB); 680 return; 681 } 682 683 // Handle register classes that require multiple instructions. 684 unsigned BeginIdx = 0; 685 unsigned SubRegs = 0; 686 unsigned Spacing = 1; 687 688 // Use VORRq when possible. 689 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) 690 Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 2; 691 else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) 692 Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 4; 693 // Fall back to VMOVD. 694 else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) 695 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2; 696 else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) 697 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3; 698 else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) 699 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4; 700 701 else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) 702 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2; 703 else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) 704 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3, Spacing = 2; 705 else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) 706 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2; 707 708 if (Opc) { 709 const TargetRegisterInfo *TRI = &getRegisterInfo(); 710 MachineInstrBuilder Mov; 711 for (unsigned i = 0; i != SubRegs; ++i) { 712 unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing); 713 unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing); 714 assert(Dst && Src && "Bad sub-register"); 715 Mov = AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst) 716 .addReg(Src)); 717 // VORR takes two source operands. 718 if (Opc == ARM::VORRq) 719 Mov.addReg(Src); 720 } 721 // Add implicit super-register defs and kills to the last instruction. 722 Mov->addRegisterDefined(DestReg, TRI); 723 if (KillSrc) 724 Mov->addRegisterKilled(SrcReg, TRI); 725 return; 726 } 727 728 llvm_unreachable("Impossible reg-to-reg copy"); 729} 730 731static const 732MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, 733 unsigned Reg, unsigned SubIdx, unsigned State, 734 const TargetRegisterInfo *TRI) { 735 if (!SubIdx) 736 return MIB.addReg(Reg, State); 737 738 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 739 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 740 return MIB.addReg(Reg, State, SubIdx); 741} 742 743void ARMBaseInstrInfo:: 744storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 745 unsigned SrcReg, bool isKill, int FI, 746 const TargetRegisterClass *RC, 747 const TargetRegisterInfo *TRI) const { 748 DebugLoc DL; 749 if (I != MBB.end()) DL = I->getDebugLoc(); 750 MachineFunction &MF = *MBB.getParent(); 751 MachineFrameInfo &MFI = *MF.getFrameInfo(); 752 unsigned Align = MFI.getObjectAlignment(FI); 753 754 MachineMemOperand *MMO = 755 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 756 MachineMemOperand::MOStore, 757 MFI.getObjectSize(FI), 758 Align); 759 760 switch (RC->getSize()) { 761 case 4: 762 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 763 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) 764 .addReg(SrcReg, getKillRegState(isKill)) 765 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 766 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 767 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) 768 .addReg(SrcReg, getKillRegState(isKill)) 769 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 770 } else 771 llvm_unreachable("Unknown reg class!"); 772 break; 773 case 8: 774 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 775 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) 776 .addReg(SrcReg, getKillRegState(isKill)) 777 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 778 } else 779 llvm_unreachable("Unknown reg class!"); 780 break; 781 case 16: 782 if (ARM::DPairRegClass.hasSubClassEq(RC)) { 783 // Use aligned spills if the stack can be realigned. 784 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 785 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64)) 786 .addFrameIndex(FI).addImm(16) 787 .addReg(SrcReg, getKillRegState(isKill)) 788 .addMemOperand(MMO)); 789 } else { 790 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) 791 .addReg(SrcReg, getKillRegState(isKill)) 792 .addFrameIndex(FI) 793 .addMemOperand(MMO)); 794 } 795 } else 796 llvm_unreachable("Unknown reg class!"); 797 break; 798 case 32: 799 if (ARM::QQPRRegClass.hasSubClassEq(RC)) { 800 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 801 // FIXME: It's possible to only store part of the QQ register if the 802 // spilled def has a sub-register index. 803 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) 804 .addFrameIndex(FI).addImm(16) 805 .addReg(SrcReg, getKillRegState(isKill)) 806 .addMemOperand(MMO)); 807 } else { 808 MachineInstrBuilder MIB = 809 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 810 .addFrameIndex(FI)) 811 .addMemOperand(MMO); 812 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 813 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 814 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 815 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 816 } 817 } else 818 llvm_unreachable("Unknown reg class!"); 819 break; 820 case 64: 821 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 822 MachineInstrBuilder MIB = 823 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 824 .addFrameIndex(FI)) 825 .addMemOperand(MMO); 826 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 827 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 828 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 829 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 830 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 831 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 832 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 833 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 834 } else 835 llvm_unreachable("Unknown reg class!"); 836 break; 837 default: 838 llvm_unreachable("Unknown reg class!"); 839 } 840} 841 842unsigned 843ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 844 int &FrameIndex) const { 845 switch (MI->getOpcode()) { 846 default: break; 847 case ARM::STRrs: 848 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 849 if (MI->getOperand(1).isFI() && 850 MI->getOperand(2).isReg() && 851 MI->getOperand(3).isImm() && 852 MI->getOperand(2).getReg() == 0 && 853 MI->getOperand(3).getImm() == 0) { 854 FrameIndex = MI->getOperand(1).getIndex(); 855 return MI->getOperand(0).getReg(); 856 } 857 break; 858 case ARM::STRi12: 859 case ARM::t2STRi12: 860 case ARM::tSTRspi: 861 case ARM::VSTRD: 862 case ARM::VSTRS: 863 if (MI->getOperand(1).isFI() && 864 MI->getOperand(2).isImm() && 865 MI->getOperand(2).getImm() == 0) { 866 FrameIndex = MI->getOperand(1).getIndex(); 867 return MI->getOperand(0).getReg(); 868 } 869 break; 870 case ARM::VST1q64: 871 if (MI->getOperand(0).isFI() && 872 MI->getOperand(2).getSubReg() == 0) { 873 FrameIndex = MI->getOperand(0).getIndex(); 874 return MI->getOperand(2).getReg(); 875 } 876 break; 877 case ARM::VSTMQIA: 878 if (MI->getOperand(1).isFI() && 879 MI->getOperand(0).getSubReg() == 0) { 880 FrameIndex = MI->getOperand(1).getIndex(); 881 return MI->getOperand(0).getReg(); 882 } 883 break; 884 } 885 886 return 0; 887} 888 889unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 890 int &FrameIndex) const { 891 const MachineMemOperand *Dummy; 892 return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); 893} 894 895void ARMBaseInstrInfo:: 896loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 897 unsigned DestReg, int FI, 898 const TargetRegisterClass *RC, 899 const TargetRegisterInfo *TRI) const { 900 DebugLoc DL; 901 if (I != MBB.end()) DL = I->getDebugLoc(); 902 MachineFunction &MF = *MBB.getParent(); 903 MachineFrameInfo &MFI = *MF.getFrameInfo(); 904 unsigned Align = MFI.getObjectAlignment(FI); 905 MachineMemOperand *MMO = 906 MF.getMachineMemOperand( 907 MachinePointerInfo::getFixedStack(FI), 908 MachineMemOperand::MOLoad, 909 MFI.getObjectSize(FI), 910 Align); 911 912 switch (RC->getSize()) { 913 case 4: 914 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 915 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 916 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 917 918 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 919 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 920 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 921 } else 922 llvm_unreachable("Unknown reg class!"); 923 break; 924 case 8: 925 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 926 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 927 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 928 } else 929 llvm_unreachable("Unknown reg class!"); 930 break; 931 case 16: 932 if (ARM::DPairRegClass.hasSubClassEq(RC)) { 933 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 934 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 935 .addFrameIndex(FI).addImm(16) 936 .addMemOperand(MMO)); 937 } else { 938 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 939 .addFrameIndex(FI) 940 .addMemOperand(MMO)); 941 } 942 } else 943 llvm_unreachable("Unknown reg class!"); 944 break; 945 case 32: 946 if (ARM::QQPRRegClass.hasSubClassEq(RC)) { 947 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 948 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 949 .addFrameIndex(FI).addImm(16) 950 .addMemOperand(MMO)); 951 } else { 952 MachineInstrBuilder MIB = 953 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 954 .addFrameIndex(FI)) 955 .addMemOperand(MMO); 956 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 957 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 958 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 959 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 960 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 961 MIB.addReg(DestReg, RegState::ImplicitDefine); 962 } 963 } else 964 llvm_unreachable("Unknown reg class!"); 965 break; 966 case 64: 967 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 968 MachineInstrBuilder MIB = 969 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 970 .addFrameIndex(FI)) 971 .addMemOperand(MMO); 972 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 973 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 974 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 975 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 976 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 977 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 978 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 979 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 980 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 981 MIB.addReg(DestReg, RegState::ImplicitDefine); 982 } else 983 llvm_unreachable("Unknown reg class!"); 984 break; 985 default: 986 llvm_unreachable("Unknown regclass!"); 987 } 988} 989 990unsigned 991ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 992 int &FrameIndex) const { 993 switch (MI->getOpcode()) { 994 default: break; 995 case ARM::LDRrs: 996 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 997 if (MI->getOperand(1).isFI() && 998 MI->getOperand(2).isReg() && 999 MI->getOperand(3).isImm() && 1000 MI->getOperand(2).getReg() == 0 && 1001 MI->getOperand(3).getImm() == 0) { 1002 FrameIndex = MI->getOperand(1).getIndex(); 1003 return MI->getOperand(0).getReg(); 1004 } 1005 break; 1006 case ARM::LDRi12: 1007 case ARM::t2LDRi12: 1008 case ARM::tLDRspi: 1009 case ARM::VLDRD: 1010 case ARM::VLDRS: 1011 if (MI->getOperand(1).isFI() && 1012 MI->getOperand(2).isImm() && 1013 MI->getOperand(2).getImm() == 0) { 1014 FrameIndex = MI->getOperand(1).getIndex(); 1015 return MI->getOperand(0).getReg(); 1016 } 1017 break; 1018 case ARM::VLD1q64: 1019 if (MI->getOperand(1).isFI() && 1020 MI->getOperand(0).getSubReg() == 0) { 1021 FrameIndex = MI->getOperand(1).getIndex(); 1022 return MI->getOperand(0).getReg(); 1023 } 1024 break; 1025 case ARM::VLDMQIA: 1026 if (MI->getOperand(1).isFI() && 1027 MI->getOperand(0).getSubReg() == 0) { 1028 FrameIndex = MI->getOperand(1).getIndex(); 1029 return MI->getOperand(0).getReg(); 1030 } 1031 break; 1032 } 1033 1034 return 0; 1035} 1036 1037unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 1038 int &FrameIndex) const { 1039 const MachineMemOperand *Dummy; 1040 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); 1041} 1042 1043bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ 1044 // This hook gets to expand COPY instructions before they become 1045 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1046 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1047 // changed into a VORR that can go down the NEON pipeline. 1048 if (!WidenVMOVS || !MI->isCopy()) 1049 return false; 1050 1051 // Look for a copy between even S-registers. That is where we keep floats 1052 // when using NEON v2f32 instructions for f32 arithmetic. 1053 unsigned DstRegS = MI->getOperand(0).getReg(); 1054 unsigned SrcRegS = MI->getOperand(1).getReg(); 1055 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1056 return false; 1057 1058 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1059 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1060 &ARM::DPRRegClass); 1061 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1062 &ARM::DPRRegClass); 1063 if (!DstRegD || !SrcRegD) 1064 return false; 1065 1066 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1067 // legal if the COPY already defines the full DstRegD, and it isn't a 1068 // sub-register insertion. 1069 if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI)) 1070 return false; 1071 1072 // A dead copy shouldn't show up here, but reject it just in case. 1073 if (MI->getOperand(0).isDead()) 1074 return false; 1075 1076 // All clear, widen the COPY. 1077 DEBUG(dbgs() << "widening: " << *MI); 1078 1079 // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg 1080 // or some other super-register. 1081 int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD); 1082 if (ImpDefIdx != -1) 1083 MI->RemoveOperand(ImpDefIdx); 1084 1085 // Change the opcode and operands. 1086 MI->setDesc(get(ARM::VMOVD)); 1087 MI->getOperand(0).setReg(DstRegD); 1088 MI->getOperand(1).setReg(SrcRegD); 1089 AddDefaultPred(MachineInstrBuilder(MI)); 1090 1091 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1092 // register scavenger and machine verifier, so we need to indicate that we 1093 // are reading an undefined value from SrcRegD, but a proper value from 1094 // SrcRegS. 1095 MI->getOperand(1).setIsUndef(); 1096 MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit); 1097 1098 // SrcRegD may actually contain an unrelated value in the ssub_1 1099 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1100 if (MI->getOperand(1).isKill()) { 1101 MI->getOperand(1).setIsKill(false); 1102 MI->addRegisterKilled(SrcRegS, TRI, true); 1103 } 1104 1105 DEBUG(dbgs() << "replaced by: " << *MI); 1106 return true; 1107} 1108 1109MachineInstr* 1110ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, 1111 int FrameIx, uint64_t Offset, 1112 const MDNode *MDPtr, 1113 DebugLoc DL) const { 1114 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE)) 1115 .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); 1116 return &*MIB; 1117} 1118 1119/// Create a copy of a const pool value. Update CPI to the new index and return 1120/// the label UID. 1121static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1122 MachineConstantPool *MCP = MF.getConstantPool(); 1123 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1124 1125 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1126 assert(MCPE.isMachineConstantPoolEntry() && 1127 "Expecting a machine constantpool entry!"); 1128 ARMConstantPoolValue *ACPV = 1129 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1130 1131 unsigned PCLabelId = AFI->createPICLabelUId(); 1132 ARMConstantPoolValue *NewCPV = 0; 1133 // FIXME: The below assumes PIC relocation model and that the function 1134 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1135 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1136 // instructions, so that's probably OK, but is PIC always correct when 1137 // we get here? 1138 if (ACPV->isGlobalValue()) 1139 NewCPV = ARMConstantPoolConstant:: 1140 Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, 1141 ARMCP::CPValue, 4); 1142 else if (ACPV->isExtSymbol()) 1143 NewCPV = ARMConstantPoolSymbol:: 1144 Create(MF.getFunction()->getContext(), 1145 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1146 else if (ACPV->isBlockAddress()) 1147 NewCPV = ARMConstantPoolConstant:: 1148 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1149 ARMCP::CPBlockAddress, 4); 1150 else if (ACPV->isLSDA()) 1151 NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId, 1152 ARMCP::CPLSDA, 4); 1153 else if (ACPV->isMachineBasicBlock()) 1154 NewCPV = ARMConstantPoolMBB:: 1155 Create(MF.getFunction()->getContext(), 1156 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1157 else 1158 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1159 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment()); 1160 return PCLabelId; 1161} 1162 1163void ARMBaseInstrInfo:: 1164reMaterialize(MachineBasicBlock &MBB, 1165 MachineBasicBlock::iterator I, 1166 unsigned DestReg, unsigned SubIdx, 1167 const MachineInstr *Orig, 1168 const TargetRegisterInfo &TRI) const { 1169 unsigned Opcode = Orig->getOpcode(); 1170 switch (Opcode) { 1171 default: { 1172 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 1173 MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 1174 MBB.insert(I, MI); 1175 break; 1176 } 1177 case ARM::tLDRpci_pic: 1178 case ARM::t2LDRpci_pic: { 1179 MachineFunction &MF = *MBB.getParent(); 1180 unsigned CPI = Orig->getOperand(1).getIndex(); 1181 unsigned PCLabelId = duplicateCPV(MF, CPI); 1182 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode), 1183 DestReg) 1184 .addConstantPoolIndex(CPI).addImm(PCLabelId); 1185 MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end()); 1186 break; 1187 } 1188 } 1189} 1190 1191MachineInstr * 1192ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const { 1193 MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF); 1194 switch(Orig->getOpcode()) { 1195 case ARM::tLDRpci_pic: 1196 case ARM::t2LDRpci_pic: { 1197 unsigned CPI = Orig->getOperand(1).getIndex(); 1198 unsigned PCLabelId = duplicateCPV(MF, CPI); 1199 Orig->getOperand(1).setIndex(CPI); 1200 Orig->getOperand(2).setImm(PCLabelId); 1201 break; 1202 } 1203 } 1204 return MI; 1205} 1206 1207bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0, 1208 const MachineInstr *MI1, 1209 const MachineRegisterInfo *MRI) const { 1210 int Opcode = MI0->getOpcode(); 1211 if (Opcode == ARM::t2LDRpci || 1212 Opcode == ARM::t2LDRpci_pic || 1213 Opcode == ARM::tLDRpci || 1214 Opcode == ARM::tLDRpci_pic || 1215 Opcode == ARM::MOV_ga_dyn || 1216 Opcode == ARM::MOV_ga_pcrel || 1217 Opcode == ARM::MOV_ga_pcrel_ldr || 1218 Opcode == ARM::t2MOV_ga_dyn || 1219 Opcode == ARM::t2MOV_ga_pcrel) { 1220 if (MI1->getOpcode() != Opcode) 1221 return false; 1222 if (MI0->getNumOperands() != MI1->getNumOperands()) 1223 return false; 1224 1225 const MachineOperand &MO0 = MI0->getOperand(1); 1226 const MachineOperand &MO1 = MI1->getOperand(1); 1227 if (MO0.getOffset() != MO1.getOffset()) 1228 return false; 1229 1230 if (Opcode == ARM::MOV_ga_dyn || 1231 Opcode == ARM::MOV_ga_pcrel || 1232 Opcode == ARM::MOV_ga_pcrel_ldr || 1233 Opcode == ARM::t2MOV_ga_dyn || 1234 Opcode == ARM::t2MOV_ga_pcrel) 1235 // Ignore the PC labels. 1236 return MO0.getGlobal() == MO1.getGlobal(); 1237 1238 const MachineFunction *MF = MI0->getParent()->getParent(); 1239 const MachineConstantPool *MCP = MF->getConstantPool(); 1240 int CPI0 = MO0.getIndex(); 1241 int CPI1 = MO1.getIndex(); 1242 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1243 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1244 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1245 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1246 if (isARMCP0 && isARMCP1) { 1247 ARMConstantPoolValue *ACPV0 = 1248 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1249 ARMConstantPoolValue *ACPV1 = 1250 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1251 return ACPV0->hasSameValue(ACPV1); 1252 } else if (!isARMCP0 && !isARMCP1) { 1253 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1254 } 1255 return false; 1256 } else if (Opcode == ARM::PICLDR) { 1257 if (MI1->getOpcode() != Opcode) 1258 return false; 1259 if (MI0->getNumOperands() != MI1->getNumOperands()) 1260 return false; 1261 1262 unsigned Addr0 = MI0->getOperand(1).getReg(); 1263 unsigned Addr1 = MI1->getOperand(1).getReg(); 1264 if (Addr0 != Addr1) { 1265 if (!MRI || 1266 !TargetRegisterInfo::isVirtualRegister(Addr0) || 1267 !TargetRegisterInfo::isVirtualRegister(Addr1)) 1268 return false; 1269 1270 // This assumes SSA form. 1271 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1272 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1273 // Check if the loaded value, e.g. a constantpool of a global address, are 1274 // the same. 1275 if (!produceSameValue(Def0, Def1, MRI)) 1276 return false; 1277 } 1278 1279 for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) { 1280 // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg 1281 const MachineOperand &MO0 = MI0->getOperand(i); 1282 const MachineOperand &MO1 = MI1->getOperand(i); 1283 if (!MO0.isIdenticalTo(MO1)) 1284 return false; 1285 } 1286 return true; 1287 } 1288 1289 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1290} 1291 1292/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1293/// determine if two loads are loading from the same base address. It should 1294/// only return true if the base pointers are the same and the only differences 1295/// between the two addresses is the offset. It also returns the offsets by 1296/// reference. 1297bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1298 int64_t &Offset1, 1299 int64_t &Offset2) const { 1300 // Don't worry about Thumb: just ARM and Thumb2. 1301 if (Subtarget.isThumb1Only()) return false; 1302 1303 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1304 return false; 1305 1306 switch (Load1->getMachineOpcode()) { 1307 default: 1308 return false; 1309 case ARM::LDRi12: 1310 case ARM::LDRBi12: 1311 case ARM::LDRD: 1312 case ARM::LDRH: 1313 case ARM::LDRSB: 1314 case ARM::LDRSH: 1315 case ARM::VLDRD: 1316 case ARM::VLDRS: 1317 case ARM::t2LDRi8: 1318 case ARM::t2LDRDi8: 1319 case ARM::t2LDRSHi8: 1320 case ARM::t2LDRi12: 1321 case ARM::t2LDRSHi12: 1322 break; 1323 } 1324 1325 switch (Load2->getMachineOpcode()) { 1326 default: 1327 return false; 1328 case ARM::LDRi12: 1329 case ARM::LDRBi12: 1330 case ARM::LDRD: 1331 case ARM::LDRH: 1332 case ARM::LDRSB: 1333 case ARM::LDRSH: 1334 case ARM::VLDRD: 1335 case ARM::VLDRS: 1336 case ARM::t2LDRi8: 1337 case ARM::t2LDRDi8: 1338 case ARM::t2LDRSHi8: 1339 case ARM::t2LDRi12: 1340 case ARM::t2LDRSHi12: 1341 break; 1342 } 1343 1344 // Check if base addresses and chain operands match. 1345 if (Load1->getOperand(0) != Load2->getOperand(0) || 1346 Load1->getOperand(4) != Load2->getOperand(4)) 1347 return false; 1348 1349 // Index should be Reg0. 1350 if (Load1->getOperand(3) != Load2->getOperand(3)) 1351 return false; 1352 1353 // Determine the offsets. 1354 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 1355 isa<ConstantSDNode>(Load2->getOperand(1))) { 1356 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 1357 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 1358 return true; 1359 } 1360 1361 return false; 1362} 1363 1364/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 1365/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 1366/// be scheduled togther. On some targets if two loads are loading from 1367/// addresses in the same cache line, it's better if they are scheduled 1368/// together. This function takes two integers that represent the load offsets 1369/// from the common base address. It returns true if it decides it's desirable 1370/// to schedule the two loads together. "NumLoads" is the number of loads that 1371/// have already been scheduled after Load1. 1372bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1373 int64_t Offset1, int64_t Offset2, 1374 unsigned NumLoads) const { 1375 // Don't worry about Thumb: just ARM and Thumb2. 1376 if (Subtarget.isThumb1Only()) return false; 1377 1378 assert(Offset2 > Offset1); 1379 1380 if ((Offset2 - Offset1) / 8 > 64) 1381 return false; 1382 1383 if (Load1->getMachineOpcode() != Load2->getMachineOpcode()) 1384 return false; // FIXME: overly conservative? 1385 1386 // Four loads in a row should be sufficient. 1387 if (NumLoads >= 3) 1388 return false; 1389 1390 return true; 1391} 1392 1393bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, 1394 const MachineBasicBlock *MBB, 1395 const MachineFunction &MF) const { 1396 // Debug info is never a scheduling boundary. It's necessary to be explicit 1397 // due to the special treatment of IT instructions below, otherwise a 1398 // dbg_value followed by an IT will result in the IT instruction being 1399 // considered a scheduling hazard, which is wrong. It should be the actual 1400 // instruction preceding the dbg_value instruction(s), just like it is 1401 // when debug info is not present. 1402 if (MI->isDebugValue()) 1403 return false; 1404 1405 // Terminators and labels can't be scheduled around. 1406 if (MI->isTerminator() || MI->isLabel()) 1407 return true; 1408 1409 // Treat the start of the IT block as a scheduling boundary, but schedule 1410 // t2IT along with all instructions following it. 1411 // FIXME: This is a big hammer. But the alternative is to add all potential 1412 // true and anti dependencies to IT block instructions as implicit operands 1413 // to the t2IT instruction. The added compile time and complexity does not 1414 // seem worth it. 1415 MachineBasicBlock::const_iterator I = MI; 1416 // Make sure to skip any dbg_value instructions 1417 while (++I != MBB->end() && I->isDebugValue()) 1418 ; 1419 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 1420 return true; 1421 1422 // Don't attempt to schedule around any instruction that defines 1423 // a stack-oriented pointer, as it's unlikely to be profitable. This 1424 // saves compile time, because it doesn't require every single 1425 // stack slot reference to depend on the instruction that does the 1426 // modification. 1427 // Calls don't actually change the stack pointer, even if they have imp-defs. 1428 // No ARM calling conventions change the stack pointer. (X86 calling 1429 // conventions sometimes do). 1430 if (!MI->isCall() && MI->definesRegister(ARM::SP)) 1431 return true; 1432 1433 return false; 1434} 1435 1436bool ARMBaseInstrInfo:: 1437isProfitableToIfCvt(MachineBasicBlock &MBB, 1438 unsigned NumCycles, unsigned ExtraPredCycles, 1439 const BranchProbability &Probability) const { 1440 if (!NumCycles) 1441 return false; 1442 1443 // Attempt to estimate the relative costs of predication versus branching. 1444 unsigned UnpredCost = Probability.getNumerator() * NumCycles; 1445 UnpredCost /= Probability.getDenominator(); 1446 UnpredCost += 1; // The branch itself 1447 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1448 1449 return (NumCycles + ExtraPredCycles) <= UnpredCost; 1450} 1451 1452bool ARMBaseInstrInfo:: 1453isProfitableToIfCvt(MachineBasicBlock &TMBB, 1454 unsigned TCycles, unsigned TExtra, 1455 MachineBasicBlock &FMBB, 1456 unsigned FCycles, unsigned FExtra, 1457 const BranchProbability &Probability) const { 1458 if (!TCycles || !FCycles) 1459 return false; 1460 1461 // Attempt to estimate the relative costs of predication versus branching. 1462 unsigned TUnpredCost = Probability.getNumerator() * TCycles; 1463 TUnpredCost /= Probability.getDenominator(); 1464 1465 uint32_t Comp = Probability.getDenominator() - Probability.getNumerator(); 1466 unsigned FUnpredCost = Comp * FCycles; 1467 FUnpredCost /= Probability.getDenominator(); 1468 1469 unsigned UnpredCost = TUnpredCost + FUnpredCost; 1470 UnpredCost += 1; // The branch itself 1471 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1472 1473 return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost; 1474} 1475 1476/// getInstrPredicate - If instruction is predicated, returns its predicate 1477/// condition, otherwise returns AL. It also returns the condition code 1478/// register by reference. 1479ARMCC::CondCodes 1480llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) { 1481 int PIdx = MI->findFirstPredOperandIdx(); 1482 if (PIdx == -1) { 1483 PredReg = 0; 1484 return ARMCC::AL; 1485 } 1486 1487 PredReg = MI->getOperand(PIdx+1).getReg(); 1488 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm(); 1489} 1490 1491 1492int llvm::getMatchingCondBranchOpcode(int Opc) { 1493 if (Opc == ARM::B) 1494 return ARM::Bcc; 1495 if (Opc == ARM::tB) 1496 return ARM::tBcc; 1497 if (Opc == ARM::t2B) 1498 return ARM::t2Bcc; 1499 1500 llvm_unreachable("Unknown unconditional branch opcode!"); 1501} 1502 1503/// commuteInstruction - Handle commutable instructions. 1504MachineInstr * 1505ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 1506 switch (MI->getOpcode()) { 1507 case ARM::MOVCCr: 1508 case ARM::t2MOVCCr: { 1509 // MOVCC can be commuted by inverting the condition. 1510 unsigned PredReg = 0; 1511 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 1512 // MOVCC AL can't be inverted. Shouldn't happen. 1513 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 1514 return NULL; 1515 MI = TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1516 if (!MI) 1517 return NULL; 1518 // After swapping the MOVCC operands, also invert the condition. 1519 MI->getOperand(MI->findFirstPredOperandIdx()) 1520 .setImm(ARMCC::getOppositeCondition(CC)); 1521 return MI; 1522 } 1523 } 1524 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1525} 1526 1527/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 1528/// instruction is encoded with an 'S' bit is determined by the optional CPSR 1529/// def operand. 1530/// 1531/// This will go away once we can teach tblgen how to set the optional CPSR def 1532/// operand itself. 1533struct AddSubFlagsOpcodePair { 1534 uint16_t PseudoOpc; 1535 uint16_t MachineOpc; 1536}; 1537 1538static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 1539 {ARM::ADDSri, ARM::ADDri}, 1540 {ARM::ADDSrr, ARM::ADDrr}, 1541 {ARM::ADDSrsi, ARM::ADDrsi}, 1542 {ARM::ADDSrsr, ARM::ADDrsr}, 1543 1544 {ARM::SUBSri, ARM::SUBri}, 1545 {ARM::SUBSrr, ARM::SUBrr}, 1546 {ARM::SUBSrsi, ARM::SUBrsi}, 1547 {ARM::SUBSrsr, ARM::SUBrsr}, 1548 1549 {ARM::RSBSri, ARM::RSBri}, 1550 {ARM::RSBSrsi, ARM::RSBrsi}, 1551 {ARM::RSBSrsr, ARM::RSBrsr}, 1552 1553 {ARM::t2ADDSri, ARM::t2ADDri}, 1554 {ARM::t2ADDSrr, ARM::t2ADDrr}, 1555 {ARM::t2ADDSrs, ARM::t2ADDrs}, 1556 1557 {ARM::t2SUBSri, ARM::t2SUBri}, 1558 {ARM::t2SUBSrr, ARM::t2SUBrr}, 1559 {ARM::t2SUBSrs, ARM::t2SUBrs}, 1560 1561 {ARM::t2RSBSri, ARM::t2RSBri}, 1562 {ARM::t2RSBSrs, ARM::t2RSBrs}, 1563}; 1564 1565unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 1566 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 1567 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 1568 return AddSubFlagsOpcodeMap[i].MachineOpc; 1569 return 0; 1570} 1571 1572void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 1573 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 1574 unsigned DestReg, unsigned BaseReg, int NumBytes, 1575 ARMCC::CondCodes Pred, unsigned PredReg, 1576 const ARMBaseInstrInfo &TII, unsigned MIFlags) { 1577 bool isSub = NumBytes < 0; 1578 if (isSub) NumBytes = -NumBytes; 1579 1580 while (NumBytes) { 1581 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 1582 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 1583 assert(ThisVal && "Didn't extract field correctly"); 1584 1585 // We will handle these bits from offset, clear them. 1586 NumBytes &= ~ThisVal; 1587 1588 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 1589 1590 // Build the new ADD / SUB. 1591 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 1592 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 1593 .addReg(BaseReg, RegState::Kill).addImm(ThisVal) 1594 .addImm((unsigned)Pred).addReg(PredReg).addReg(0) 1595 .setMIFlags(MIFlags); 1596 BaseReg = DestReg; 1597 } 1598} 1599 1600bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 1601 unsigned FrameReg, int &Offset, 1602 const ARMBaseInstrInfo &TII) { 1603 unsigned Opcode = MI.getOpcode(); 1604 const MCInstrDesc &Desc = MI.getDesc(); 1605 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 1606 bool isSub = false; 1607 1608 // Memory operands in inline assembly always use AddrMode2. 1609 if (Opcode == ARM::INLINEASM) 1610 AddrMode = ARMII::AddrMode2; 1611 1612 if (Opcode == ARM::ADDri) { 1613 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 1614 if (Offset == 0) { 1615 // Turn it into a move. 1616 MI.setDesc(TII.get(ARM::MOVr)); 1617 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1618 MI.RemoveOperand(FrameRegIdx+1); 1619 Offset = 0; 1620 return true; 1621 } else if (Offset < 0) { 1622 Offset = -Offset; 1623 isSub = true; 1624 MI.setDesc(TII.get(ARM::SUBri)); 1625 } 1626 1627 // Common case: small offset, fits into instruction. 1628 if (ARM_AM::getSOImmVal(Offset) != -1) { 1629 // Replace the FrameIndex with sp / fp 1630 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1631 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 1632 Offset = 0; 1633 return true; 1634 } 1635 1636 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 1637 // as possible. 1638 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 1639 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 1640 1641 // We will handle these bits from offset, clear them. 1642 Offset &= ~ThisImmVal; 1643 1644 // Get the properly encoded SOImmVal field. 1645 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 1646 "Bit extraction didn't work?"); 1647 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 1648 } else { 1649 unsigned ImmIdx = 0; 1650 int InstrOffs = 0; 1651 unsigned NumBits = 0; 1652 unsigned Scale = 1; 1653 switch (AddrMode) { 1654 case ARMII::AddrMode_i12: { 1655 ImmIdx = FrameRegIdx + 1; 1656 InstrOffs = MI.getOperand(ImmIdx).getImm(); 1657 NumBits = 12; 1658 break; 1659 } 1660 case ARMII::AddrMode2: { 1661 ImmIdx = FrameRegIdx+2; 1662 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 1663 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1664 InstrOffs *= -1; 1665 NumBits = 12; 1666 break; 1667 } 1668 case ARMII::AddrMode3: { 1669 ImmIdx = FrameRegIdx+2; 1670 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 1671 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1672 InstrOffs *= -1; 1673 NumBits = 8; 1674 break; 1675 } 1676 case ARMII::AddrMode4: 1677 case ARMII::AddrMode6: 1678 // Can't fold any offset even if it's zero. 1679 return false; 1680 case ARMII::AddrMode5: { 1681 ImmIdx = FrameRegIdx+1; 1682 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 1683 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1684 InstrOffs *= -1; 1685 NumBits = 8; 1686 Scale = 4; 1687 break; 1688 } 1689 default: 1690 llvm_unreachable("Unsupported addressing mode!"); 1691 } 1692 1693 Offset += InstrOffs * Scale; 1694 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 1695 if (Offset < 0) { 1696 Offset = -Offset; 1697 isSub = true; 1698 } 1699 1700 // Attempt to fold address comp. if opcode has offset bits 1701 if (NumBits > 0) { 1702 // Common case: small offset, fits into instruction. 1703 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 1704 int ImmedOffset = Offset / Scale; 1705 unsigned Mask = (1 << NumBits) - 1; 1706 if ((unsigned)Offset <= Mask * Scale) { 1707 // Replace the FrameIndex with sp 1708 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1709 // FIXME: When addrmode2 goes away, this will simplify (like the 1710 // T2 version), as the LDR.i12 versions don't need the encoding 1711 // tricks for the offset value. 1712 if (isSub) { 1713 if (AddrMode == ARMII::AddrMode_i12) 1714 ImmedOffset = -ImmedOffset; 1715 else 1716 ImmedOffset |= 1 << NumBits; 1717 } 1718 ImmOp.ChangeToImmediate(ImmedOffset); 1719 Offset = 0; 1720 return true; 1721 } 1722 1723 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 1724 ImmedOffset = ImmedOffset & Mask; 1725 if (isSub) { 1726 if (AddrMode == ARMII::AddrMode_i12) 1727 ImmedOffset = -ImmedOffset; 1728 else 1729 ImmedOffset |= 1 << NumBits; 1730 } 1731 ImmOp.ChangeToImmediate(ImmedOffset); 1732 Offset &= ~(Mask*Scale); 1733 } 1734 } 1735 1736 Offset = (isSub) ? -Offset : Offset; 1737 return Offset == 0; 1738} 1739 1740bool ARMBaseInstrInfo:: 1741AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask, 1742 int &CmpValue) const { 1743 switch (MI->getOpcode()) { 1744 default: break; 1745 case ARM::CMPri: 1746 case ARM::t2CMPri: 1747 SrcReg = MI->getOperand(0).getReg(); 1748 CmpMask = ~0; 1749 CmpValue = MI->getOperand(1).getImm(); 1750 return true; 1751 case ARM::CMPrr: 1752 case ARM::t2CMPrr: 1753 SrcReg = MI->getOperand(0).getReg(); 1754 CmpMask = ~0; 1755 CmpValue = 0; 1756 return true; 1757 case ARM::TSTri: 1758 case ARM::t2TSTri: 1759 SrcReg = MI->getOperand(0).getReg(); 1760 CmpMask = MI->getOperand(1).getImm(); 1761 CmpValue = 0; 1762 return true; 1763 } 1764 1765 return false; 1766} 1767 1768/// isSuitableForMask - Identify a suitable 'and' instruction that 1769/// operates on the given source register and applies the same mask 1770/// as a 'tst' instruction. Provide a limited look-through for copies. 1771/// When successful, MI will hold the found instruction. 1772static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, 1773 int CmpMask, bool CommonUse) { 1774 switch (MI->getOpcode()) { 1775 case ARM::ANDri: 1776 case ARM::t2ANDri: 1777 if (CmpMask != MI->getOperand(2).getImm()) 1778 return false; 1779 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 1780 return true; 1781 break; 1782 case ARM::COPY: { 1783 // Walk down one instruction which is potentially an 'and'. 1784 const MachineInstr &Copy = *MI; 1785 MachineBasicBlock::iterator AND( 1786 llvm::next(MachineBasicBlock::iterator(MI))); 1787 if (AND == MI->getParent()->end()) return false; 1788 MI = AND; 1789 return isSuitableForMask(MI, Copy.getOperand(0).getReg(), 1790 CmpMask, true); 1791 } 1792 } 1793 1794 return false; 1795} 1796 1797/// OptimizeCompareInstr - Convert the instruction supplying the argument to the 1798/// comparison into one that sets the zero bit in the flags register. Convert 1799/// the SUBrr(r1,r2)|Subri(r1,CmpValue) instruction into one that sets the flags 1800/// register and remove the CMPrr(r1,r2)|CMPrr(r2,r1)|CMPri(r1,CmpValue) 1801/// instruction. 1802bool ARMBaseInstrInfo:: 1803OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask, 1804 int CmpValue, const MachineRegisterInfo *MRI) const { 1805 1806 MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg); 1807 if (llvm::next(DI) != MRI->def_end()) 1808 // Only support one definition. 1809 return false; 1810 1811 MachineInstr *MI = &*DI; 1812 1813 // Masked compares sometimes use the same register as the corresponding 'and'. 1814 if (CmpMask != ~0) { 1815 if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) { 1816 MI = 0; 1817 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg), 1818 UE = MRI->use_end(); UI != UE; ++UI) { 1819 if (UI->getParent() != CmpInstr->getParent()) continue; 1820 MachineInstr *PotentialAND = &*UI; 1821 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true)) 1822 continue; 1823 MI = PotentialAND; 1824 break; 1825 } 1826 if (!MI) return false; 1827 } 1828 } 1829 1830 // Get ready to iterate backward from CmpInstr. 1831 MachineBasicBlock::iterator I = CmpInstr, E = MI, 1832 B = CmpInstr->getParent()->begin(); 1833 1834 // Early exit if CmpInstr is at the beginning of the BB. 1835 if (I == B) return false; 1836 1837 // There are two possible candidates which can be changed to set CPSR: 1838 // One is MI, the other is a SUB instruction. 1839 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). 1840 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 1841 MachineInstr *Sub = NULL; 1842 unsigned SrcReg2 = 0; 1843 if (CmpInstr->getOpcode() == ARM::CMPrr || 1844 CmpInstr->getOpcode() == ARM::t2CMPrr) { 1845 SrcReg2 = CmpInstr->getOperand(1).getReg(); 1846 // MI is not a candidate for CMPrr. 1847 MI = NULL; 1848 } else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) { 1849 // Conservatively refuse to convert an instruction which isn't in the same 1850 // BB as the comparison. 1851 // For CMPri, we need to check Sub, thus we can't return here. 1852 if (CmpInstr->getOpcode() == ARM::CMPri || 1853 CmpInstr->getOpcode() == ARM::t2CMPri) 1854 MI = NULL; 1855 else 1856 return false; 1857 } 1858 1859 // Check that CPSR isn't set between the comparison instruction and the one we 1860 // want to change. At the same time, search for Sub. 1861 --I; 1862 for (; I != E; --I) { 1863 const MachineInstr &Instr = *I; 1864 1865 for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) { 1866 const MachineOperand &MO = Instr.getOperand(IO); 1867 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) 1868 return false; 1869 if (!MO.isReg()) continue; 1870 1871 // This instruction modifies or uses CPSR after the one we want to 1872 // change. We can't do this transformation. 1873 if (MO.getReg() == ARM::CPSR) 1874 return false; 1875 } 1876 1877 // Check whether the current instruction is SUB(r1, r2) or SUB(r2, r1). 1878 if (SrcReg2 != 0 && Instr.getOpcode() == ARM::SUBrr && 1879 ((Instr.getOperand(1).getReg() == SrcReg && 1880 Instr.getOperand(2).getReg() == SrcReg2) || 1881 (Instr.getOperand(1).getReg() == SrcReg2 && 1882 Instr.getOperand(2).getReg() == SrcReg))) { 1883 Sub = &*I; 1884 break; 1885 } 1886 1887 // Check whether the current instruction is SUBri(r1, CmpValue). 1888 if ((CmpInstr->getOpcode() == ARM::CMPri || 1889 CmpInstr->getOpcode() == ARM::t2CMPri) && 1890 Instr.getOpcode() == ARM::SUBri && CmpValue != 0 && 1891 Instr.getOperand(1).getReg() == SrcReg && 1892 Instr.getOperand(2).getImm() == CmpValue) { 1893 Sub = &*I; 1894 break; 1895 } 1896 1897 if (I == B) 1898 // The 'and' is below the comparison instruction. 1899 return false; 1900 } 1901 1902 // Return false if no candidates exist. 1903 if (!MI && !Sub) 1904 return false; 1905 1906 // The single candidate is called MI. 1907 if (!MI) MI = Sub; 1908 1909 switch (MI->getOpcode()) { 1910 default: break; 1911 case ARM::RSBrr: 1912 case ARM::RSBri: 1913 case ARM::RSCrr: 1914 case ARM::RSCri: 1915 case ARM::ADDrr: 1916 case ARM::ADDri: 1917 case ARM::ADCrr: 1918 case ARM::ADCri: 1919 case ARM::SUBrr: 1920 case ARM::SUBri: 1921 case ARM::SBCrr: 1922 case ARM::SBCri: 1923 case ARM::t2RSBri: 1924 case ARM::t2ADDrr: 1925 case ARM::t2ADDri: 1926 case ARM::t2ADCrr: 1927 case ARM::t2ADCri: 1928 case ARM::t2SUBrr: 1929 case ARM::t2SUBri: 1930 case ARM::t2SBCrr: 1931 case ARM::t2SBCri: 1932 case ARM::ANDrr: 1933 case ARM::ANDri: 1934 case ARM::t2ANDrr: 1935 case ARM::t2ANDri: 1936 case ARM::ORRrr: 1937 case ARM::ORRri: 1938 case ARM::t2ORRrr: 1939 case ARM::t2ORRri: 1940 case ARM::EORrr: 1941 case ARM::EORri: 1942 case ARM::t2EORrr: 1943 case ARM::t2EORri: { 1944 // Scan forward for the use of CPSR 1945 // When checking against MI: if it's a conditional code requires 1946 // checking of V bit, then this is not safe to do. If we can't find the 1947 // CPSR use (i.e. used in another block), then it's not safe to perform 1948 // the optimization. 1949 // When checking against Sub, we handle the condition codes GE, LT, GT, LE. 1950 SmallVector<MachineOperand*, 4> OperandsToUpdate; 1951 bool isSafe = false; 1952 I = CmpInstr; 1953 E = CmpInstr->getParent()->end(); 1954 while (!isSafe && ++I != E) { 1955 const MachineInstr &Instr = *I; 1956 for (unsigned IO = 0, EO = Instr.getNumOperands(); 1957 !isSafe && IO != EO; ++IO) { 1958 const MachineOperand &MO = Instr.getOperand(IO); 1959 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 1960 isSafe = true; 1961 break; 1962 } 1963 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 1964 continue; 1965 if (MO.isDef()) { 1966 isSafe = true; 1967 break; 1968 } 1969 // Condition code is after the operand before CPSR. 1970 ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm(); 1971 if (Sub) 1972 switch (CC) { 1973 default: 1974 return false; 1975 case ARMCC::GE: 1976 case ARMCC::LT: 1977 case ARMCC::GT: 1978 case ARMCC::LE: 1979 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 1980 // on CMP needs to be updated to be based on SUB. 1981 // Push the condition code operands to OperandsToUpdate. 1982 // If it is safe to remove CmpInstr, the condition code of these 1983 // operands will be modified. 1984 if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 1985 Sub->getOperand(2).getReg() == SrcReg) 1986 OperandsToUpdate.push_back(&((*I).getOperand(IO-1))); 1987 break; 1988 } 1989 else 1990 switch (CC) { 1991 default: 1992 isSafe = true; 1993 break; 1994 case ARMCC::VS: 1995 case ARMCC::VC: 1996 case ARMCC::GE: 1997 case ARMCC::LT: 1998 case ARMCC::GT: 1999 case ARMCC::LE: 2000 return false; 2001 } 2002 } 2003 } 2004 2005 // If the candidate is Sub, we may exit the loop at end of the basic block. 2006 // In that case, it is still safe to remove CmpInstr. 2007 if (!isSafe && !Sub) 2008 return false; 2009 2010 // Toggle the optional operand to CPSR. 2011 MI->getOperand(5).setReg(ARM::CPSR); 2012 MI->getOperand(5).setIsDef(true); 2013 CmpInstr->eraseFromParent(); 2014 2015 // Modify the condition code of operands in OperandsToUpdate. 2016 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 2017 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 2018 for (unsigned i = 0; i < OperandsToUpdate.size(); i++) { 2019 ARMCC::CondCodes CC = (ARMCC::CondCodes)OperandsToUpdate[i]->getImm(); 2020 ARMCC::CondCodes NewCC; 2021 switch (CC) { 2022 default: llvm_unreachable("only expecting less/greater comparisons here"); 2023 case ARMCC::GE: NewCC = ARMCC::LE; break; 2024 case ARMCC::LT: NewCC = ARMCC::GT; break; 2025 case ARMCC::GT: NewCC = ARMCC::LT; break; 2026 case ARMCC::LE: NewCC = ARMCC::GT; break; 2027 } 2028 OperandsToUpdate[i]->setImm(NewCC); 2029 } 2030 return true; 2031 } 2032 } 2033 2034 return false; 2035} 2036 2037bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, 2038 MachineInstr *DefMI, unsigned Reg, 2039 MachineRegisterInfo *MRI) const { 2040 // Fold large immediates into add, sub, or, xor. 2041 unsigned DefOpc = DefMI->getOpcode(); 2042 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 2043 return false; 2044 if (!DefMI->getOperand(1).isImm()) 2045 // Could be t2MOVi32imm <ga:xx> 2046 return false; 2047 2048 if (!MRI->hasOneNonDBGUse(Reg)) 2049 return false; 2050 2051 const MCInstrDesc &DefMCID = DefMI->getDesc(); 2052 if (DefMCID.hasOptionalDef()) { 2053 unsigned NumOps = DefMCID.getNumOperands(); 2054 const MachineOperand &MO = DefMI->getOperand(NumOps-1); 2055 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 2056 // If DefMI defines CPSR and it is not dead, it's obviously not safe 2057 // to delete DefMI. 2058 return false; 2059 } 2060 2061 const MCInstrDesc &UseMCID = UseMI->getDesc(); 2062 if (UseMCID.hasOptionalDef()) { 2063 unsigned NumOps = UseMCID.getNumOperands(); 2064 if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR) 2065 // If the instruction sets the flag, do not attempt this optimization 2066 // since it may change the semantics of the code. 2067 return false; 2068 } 2069 2070 unsigned UseOpc = UseMI->getOpcode(); 2071 unsigned NewUseOpc = 0; 2072 uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm(); 2073 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 2074 bool Commute = false; 2075 switch (UseOpc) { 2076 default: return false; 2077 case ARM::SUBrr: 2078 case ARM::ADDrr: 2079 case ARM::ORRrr: 2080 case ARM::EORrr: 2081 case ARM::t2SUBrr: 2082 case ARM::t2ADDrr: 2083 case ARM::t2ORRrr: 2084 case ARM::t2EORrr: { 2085 Commute = UseMI->getOperand(2).getReg() != Reg; 2086 switch (UseOpc) { 2087 default: break; 2088 case ARM::SUBrr: { 2089 if (Commute) 2090 return false; 2091 ImmVal = -ImmVal; 2092 NewUseOpc = ARM::SUBri; 2093 // Fallthrough 2094 } 2095 case ARM::ADDrr: 2096 case ARM::ORRrr: 2097 case ARM::EORrr: { 2098 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 2099 return false; 2100 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 2101 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 2102 switch (UseOpc) { 2103 default: break; 2104 case ARM::ADDrr: NewUseOpc = ARM::ADDri; break; 2105 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 2106 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 2107 } 2108 break; 2109 } 2110 case ARM::t2SUBrr: { 2111 if (Commute) 2112 return false; 2113 ImmVal = -ImmVal; 2114 NewUseOpc = ARM::t2SUBri; 2115 // Fallthrough 2116 } 2117 case ARM::t2ADDrr: 2118 case ARM::t2ORRrr: 2119 case ARM::t2EORrr: { 2120 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 2121 return false; 2122 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 2123 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 2124 switch (UseOpc) { 2125 default: break; 2126 case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break; 2127 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 2128 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 2129 } 2130 break; 2131 } 2132 } 2133 } 2134 } 2135 2136 unsigned OpIdx = Commute ? 2 : 1; 2137 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg(); 2138 bool isKill = UseMI->getOperand(OpIdx).isKill(); 2139 unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); 2140 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(), 2141 UseMI, UseMI->getDebugLoc(), 2142 get(NewUseOpc), NewReg) 2143 .addReg(Reg1, getKillRegState(isKill)) 2144 .addImm(SOImmValV1))); 2145 UseMI->setDesc(get(NewUseOpc)); 2146 UseMI->getOperand(1).setReg(NewReg); 2147 UseMI->getOperand(1).setIsKill(); 2148 UseMI->getOperand(2).ChangeToImmediate(SOImmValV2); 2149 DefMI->eraseFromParent(); 2150 return true; 2151} 2152 2153unsigned 2154ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 2155 const MachineInstr *MI) const { 2156 if (!ItinData || ItinData->isEmpty()) 2157 return 1; 2158 2159 const MCInstrDesc &Desc = MI->getDesc(); 2160 unsigned Class = Desc.getSchedClass(); 2161 unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; 2162 if (UOps) 2163 return UOps; 2164 2165 unsigned Opc = MI->getOpcode(); 2166 switch (Opc) { 2167 default: 2168 llvm_unreachable("Unexpected multi-uops instruction!"); 2169 case ARM::VLDMQIA: 2170 case ARM::VSTMQIA: 2171 return 2; 2172 2173 // The number of uOps for load / store multiple are determined by the number 2174 // registers. 2175 // 2176 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 2177 // same cycle. The scheduling for the first load / store must be done 2178 // separately by assuming the the address is not 64-bit aligned. 2179 // 2180 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 2181 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 2182 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 2183 case ARM::VLDMDIA: 2184 case ARM::VLDMDIA_UPD: 2185 case ARM::VLDMDDB_UPD: 2186 case ARM::VLDMSIA: 2187 case ARM::VLDMSIA_UPD: 2188 case ARM::VLDMSDB_UPD: 2189 case ARM::VSTMDIA: 2190 case ARM::VSTMDIA_UPD: 2191 case ARM::VSTMDDB_UPD: 2192 case ARM::VSTMSIA: 2193 case ARM::VSTMSIA_UPD: 2194 case ARM::VSTMSDB_UPD: { 2195 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands(); 2196 return (NumRegs / 2) + (NumRegs % 2) + 1; 2197 } 2198 2199 case ARM::LDMIA_RET: 2200 case ARM::LDMIA: 2201 case ARM::LDMDA: 2202 case ARM::LDMDB: 2203 case ARM::LDMIB: 2204 case ARM::LDMIA_UPD: 2205 case ARM::LDMDA_UPD: 2206 case ARM::LDMDB_UPD: 2207 case ARM::LDMIB_UPD: 2208 case ARM::STMIA: 2209 case ARM::STMDA: 2210 case ARM::STMDB: 2211 case ARM::STMIB: 2212 case ARM::STMIA_UPD: 2213 case ARM::STMDA_UPD: 2214 case ARM::STMDB_UPD: 2215 case ARM::STMIB_UPD: 2216 case ARM::tLDMIA: 2217 case ARM::tLDMIA_UPD: 2218 case ARM::tSTMIA_UPD: 2219 case ARM::tPOP_RET: 2220 case ARM::tPOP: 2221 case ARM::tPUSH: 2222 case ARM::t2LDMIA_RET: 2223 case ARM::t2LDMIA: 2224 case ARM::t2LDMDB: 2225 case ARM::t2LDMIA_UPD: 2226 case ARM::t2LDMDB_UPD: 2227 case ARM::t2STMIA: 2228 case ARM::t2STMDB: 2229 case ARM::t2STMIA_UPD: 2230 case ARM::t2STMDB_UPD: { 2231 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1; 2232 if (Subtarget.isCortexA8()) { 2233 if (NumRegs < 4) 2234 return 2; 2235 // 4 registers would be issued: 2, 2. 2236 // 5 registers would be issued: 2, 2, 1. 2237 UOps = (NumRegs / 2); 2238 if (NumRegs % 2) 2239 ++UOps; 2240 return UOps; 2241 } else if (Subtarget.isCortexA9()) { 2242 UOps = (NumRegs / 2); 2243 // If there are odd number of registers or if it's not 64-bit aligned, 2244 // then it takes an extra AGU (Address Generation Unit) cycle. 2245 if ((NumRegs % 2) || 2246 !MI->hasOneMemOperand() || 2247 (*MI->memoperands_begin())->getAlignment() < 8) 2248 ++UOps; 2249 return UOps; 2250 } else { 2251 // Assume the worst. 2252 return NumRegs; 2253 } 2254 } 2255 } 2256} 2257 2258int 2259ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 2260 const MCInstrDesc &DefMCID, 2261 unsigned DefClass, 2262 unsigned DefIdx, unsigned DefAlign) const { 2263 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 2264 if (RegNo <= 0) 2265 // Def is the address writeback. 2266 return ItinData->getOperandCycle(DefClass, DefIdx); 2267 2268 int DefCycle; 2269 if (Subtarget.isCortexA8()) { 2270 // (regno / 2) + (regno % 2) + 1 2271 DefCycle = RegNo / 2 + 1; 2272 if (RegNo % 2) 2273 ++DefCycle; 2274 } else if (Subtarget.isCortexA9()) { 2275 DefCycle = RegNo; 2276 bool isSLoad = false; 2277 2278 switch (DefMCID.getOpcode()) { 2279 default: break; 2280 case ARM::VLDMSIA: 2281 case ARM::VLDMSIA_UPD: 2282 case ARM::VLDMSDB_UPD: 2283 isSLoad = true; 2284 break; 2285 } 2286 2287 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 2288 // then it takes an extra cycle. 2289 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 2290 ++DefCycle; 2291 } else { 2292 // Assume the worst. 2293 DefCycle = RegNo + 2; 2294 } 2295 2296 return DefCycle; 2297} 2298 2299int 2300ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 2301 const MCInstrDesc &DefMCID, 2302 unsigned DefClass, 2303 unsigned DefIdx, unsigned DefAlign) const { 2304 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 2305 if (RegNo <= 0) 2306 // Def is the address writeback. 2307 return ItinData->getOperandCycle(DefClass, DefIdx); 2308 2309 int DefCycle; 2310 if (Subtarget.isCortexA8()) { 2311 // 4 registers would be issued: 1, 2, 1. 2312 // 5 registers would be issued: 1, 2, 2. 2313 DefCycle = RegNo / 2; 2314 if (DefCycle < 1) 2315 DefCycle = 1; 2316 // Result latency is issue cycle + 2: E2. 2317 DefCycle += 2; 2318 } else if (Subtarget.isCortexA9()) { 2319 DefCycle = (RegNo / 2); 2320 // If there are odd number of registers or if it's not 64-bit aligned, 2321 // then it takes an extra AGU (Address Generation Unit) cycle. 2322 if ((RegNo % 2) || DefAlign < 8) 2323 ++DefCycle; 2324 // Result latency is AGU cycles + 2. 2325 DefCycle += 2; 2326 } else { 2327 // Assume the worst. 2328 DefCycle = RegNo + 2; 2329 } 2330 2331 return DefCycle; 2332} 2333 2334int 2335ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 2336 const MCInstrDesc &UseMCID, 2337 unsigned UseClass, 2338 unsigned UseIdx, unsigned UseAlign) const { 2339 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 2340 if (RegNo <= 0) 2341 return ItinData->getOperandCycle(UseClass, UseIdx); 2342 2343 int UseCycle; 2344 if (Subtarget.isCortexA8()) { 2345 // (regno / 2) + (regno % 2) + 1 2346 UseCycle = RegNo / 2 + 1; 2347 if (RegNo % 2) 2348 ++UseCycle; 2349 } else if (Subtarget.isCortexA9()) { 2350 UseCycle = RegNo; 2351 bool isSStore = false; 2352 2353 switch (UseMCID.getOpcode()) { 2354 default: break; 2355 case ARM::VSTMSIA: 2356 case ARM::VSTMSIA_UPD: 2357 case ARM::VSTMSDB_UPD: 2358 isSStore = true; 2359 break; 2360 } 2361 2362 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 2363 // then it takes an extra cycle. 2364 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 2365 ++UseCycle; 2366 } else { 2367 // Assume the worst. 2368 UseCycle = RegNo + 2; 2369 } 2370 2371 return UseCycle; 2372} 2373 2374int 2375ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 2376 const MCInstrDesc &UseMCID, 2377 unsigned UseClass, 2378 unsigned UseIdx, unsigned UseAlign) const { 2379 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 2380 if (RegNo <= 0) 2381 return ItinData->getOperandCycle(UseClass, UseIdx); 2382 2383 int UseCycle; 2384 if (Subtarget.isCortexA8()) { 2385 UseCycle = RegNo / 2; 2386 if (UseCycle < 2) 2387 UseCycle = 2; 2388 // Read in E3. 2389 UseCycle += 2; 2390 } else if (Subtarget.isCortexA9()) { 2391 UseCycle = (RegNo / 2); 2392 // If there are odd number of registers or if it's not 64-bit aligned, 2393 // then it takes an extra AGU (Address Generation Unit) cycle. 2394 if ((RegNo % 2) || UseAlign < 8) 2395 ++UseCycle; 2396 } else { 2397 // Assume the worst. 2398 UseCycle = 1; 2399 } 2400 return UseCycle; 2401} 2402 2403int 2404ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2405 const MCInstrDesc &DefMCID, 2406 unsigned DefIdx, unsigned DefAlign, 2407 const MCInstrDesc &UseMCID, 2408 unsigned UseIdx, unsigned UseAlign) const { 2409 unsigned DefClass = DefMCID.getSchedClass(); 2410 unsigned UseClass = UseMCID.getSchedClass(); 2411 2412 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 2413 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 2414 2415 // This may be a def / use of a variable_ops instruction, the operand 2416 // latency might be determinable dynamically. Let the target try to 2417 // figure it out. 2418 int DefCycle = -1; 2419 bool LdmBypass = false; 2420 switch (DefMCID.getOpcode()) { 2421 default: 2422 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 2423 break; 2424 2425 case ARM::VLDMDIA: 2426 case ARM::VLDMDIA_UPD: 2427 case ARM::VLDMDDB_UPD: 2428 case ARM::VLDMSIA: 2429 case ARM::VLDMSIA_UPD: 2430 case ARM::VLDMSDB_UPD: 2431 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 2432 break; 2433 2434 case ARM::LDMIA_RET: 2435 case ARM::LDMIA: 2436 case ARM::LDMDA: 2437 case ARM::LDMDB: 2438 case ARM::LDMIB: 2439 case ARM::LDMIA_UPD: 2440 case ARM::LDMDA_UPD: 2441 case ARM::LDMDB_UPD: 2442 case ARM::LDMIB_UPD: 2443 case ARM::tLDMIA: 2444 case ARM::tLDMIA_UPD: 2445 case ARM::tPUSH: 2446 case ARM::t2LDMIA_RET: 2447 case ARM::t2LDMIA: 2448 case ARM::t2LDMDB: 2449 case ARM::t2LDMIA_UPD: 2450 case ARM::t2LDMDB_UPD: 2451 LdmBypass = 1; 2452 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 2453 break; 2454 } 2455 2456 if (DefCycle == -1) 2457 // We can't seem to determine the result latency of the def, assume it's 2. 2458 DefCycle = 2; 2459 2460 int UseCycle = -1; 2461 switch (UseMCID.getOpcode()) { 2462 default: 2463 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 2464 break; 2465 2466 case ARM::VSTMDIA: 2467 case ARM::VSTMDIA_UPD: 2468 case ARM::VSTMDDB_UPD: 2469 case ARM::VSTMSIA: 2470 case ARM::VSTMSIA_UPD: 2471 case ARM::VSTMSDB_UPD: 2472 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 2473 break; 2474 2475 case ARM::STMIA: 2476 case ARM::STMDA: 2477 case ARM::STMDB: 2478 case ARM::STMIB: 2479 case ARM::STMIA_UPD: 2480 case ARM::STMDA_UPD: 2481 case ARM::STMDB_UPD: 2482 case ARM::STMIB_UPD: 2483 case ARM::tSTMIA_UPD: 2484 case ARM::tPOP_RET: 2485 case ARM::tPOP: 2486 case ARM::t2STMIA: 2487 case ARM::t2STMDB: 2488 case ARM::t2STMIA_UPD: 2489 case ARM::t2STMDB_UPD: 2490 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 2491 break; 2492 } 2493 2494 if (UseCycle == -1) 2495 // Assume it's read in the first stage. 2496 UseCycle = 1; 2497 2498 UseCycle = DefCycle - UseCycle + 1; 2499 if (UseCycle > 0) { 2500 if (LdmBypass) { 2501 // It's a variable_ops instruction so we can't use DefIdx here. Just use 2502 // first def operand. 2503 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 2504 UseClass, UseIdx)) 2505 --UseCycle; 2506 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 2507 UseClass, UseIdx)) { 2508 --UseCycle; 2509 } 2510 } 2511 2512 return UseCycle; 2513} 2514 2515static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 2516 const MachineInstr *MI, unsigned Reg, 2517 unsigned &DefIdx, unsigned &Dist) { 2518 Dist = 0; 2519 2520 MachineBasicBlock::const_iterator I = MI; ++I; 2521 MachineBasicBlock::const_instr_iterator II = 2522 llvm::prior(I.getInstrIterator()); 2523 assert(II->isInsideBundle() && "Empty bundle?"); 2524 2525 int Idx = -1; 2526 while (II->isInsideBundle()) { 2527 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 2528 if (Idx != -1) 2529 break; 2530 --II; 2531 ++Dist; 2532 } 2533 2534 assert(Idx != -1 && "Cannot find bundled definition!"); 2535 DefIdx = Idx; 2536 return II; 2537} 2538 2539static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 2540 const MachineInstr *MI, unsigned Reg, 2541 unsigned &UseIdx, unsigned &Dist) { 2542 Dist = 0; 2543 2544 MachineBasicBlock::const_instr_iterator II = MI; ++II; 2545 assert(II->isInsideBundle() && "Empty bundle?"); 2546 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 2547 2548 // FIXME: This doesn't properly handle multiple uses. 2549 int Idx = -1; 2550 while (II != E && II->isInsideBundle()) { 2551 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 2552 if (Idx != -1) 2553 break; 2554 if (II->getOpcode() != ARM::t2IT) 2555 ++Dist; 2556 ++II; 2557 } 2558 2559 if (Idx == -1) { 2560 Dist = 0; 2561 return 0; 2562 } 2563 2564 UseIdx = Idx; 2565 return II; 2566} 2567 2568int 2569ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2570 const MachineInstr *DefMI, unsigned DefIdx, 2571 const MachineInstr *UseMI, 2572 unsigned UseIdx) const { 2573 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() || 2574 DefMI->isRegSequence() || DefMI->isImplicitDef()) { 2575 return 1; 2576 } 2577 if (!ItinData || ItinData->isEmpty()) 2578 return DefMI->mayLoad() ? 3 : 1; 2579 2580 const MCInstrDesc *DefMCID = &DefMI->getDesc(); 2581 const MCInstrDesc *UseMCID = &UseMI->getDesc(); 2582 const MachineOperand &DefMO = DefMI->getOperand(DefIdx); 2583 unsigned Reg = DefMO.getReg(); 2584 if (Reg == ARM::CPSR) { 2585 if (DefMI->getOpcode() == ARM::FMSTAT) { 2586 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 2587 return Subtarget.isCortexA9() ? 1 : 20; 2588 } 2589 2590 // CPSR set and branch can be paired in the same cycle. 2591 if (UseMI->isBranch()) 2592 return 0; 2593 2594 // Otherwise it takes the instruction latency (generally one). 2595 unsigned Latency = getInstrLatency(ItinData, DefMI); 2596 2597 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 2598 // its uses. Instructions which are otherwise scheduled between them may 2599 // incur a code size penalty (not able to use the CPSR setting 16-bit 2600 // instructions). 2601 if (Latency > 0 && Subtarget.isThumb2()) { 2602 const MachineFunction *MF = DefMI->getParent()->getParent(); 2603 if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize)) 2604 --Latency; 2605 } 2606 return Latency; 2607 } 2608 2609 unsigned DefAlign = DefMI->hasOneMemOperand() 2610 ? (*DefMI->memoperands_begin())->getAlignment() : 0; 2611 unsigned UseAlign = UseMI->hasOneMemOperand() 2612 ? (*UseMI->memoperands_begin())->getAlignment() : 0; 2613 2614 unsigned DefAdj = 0; 2615 if (DefMI->isBundle()) { 2616 DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj); 2617 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() || 2618 DefMI->isRegSequence() || DefMI->isImplicitDef()) 2619 return 1; 2620 DefMCID = &DefMI->getDesc(); 2621 } 2622 unsigned UseAdj = 0; 2623 if (UseMI->isBundle()) { 2624 unsigned NewUseIdx; 2625 const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI, 2626 Reg, NewUseIdx, UseAdj); 2627 if (NewUseMI) { 2628 UseMI = NewUseMI; 2629 UseIdx = NewUseIdx; 2630 UseMCID = &UseMI->getDesc(); 2631 } 2632 } 2633 2634 int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign, 2635 *UseMCID, UseIdx, UseAlign); 2636 int Adj = DefAdj + UseAdj; 2637 if (Adj) { 2638 Latency -= (int)(DefAdj + UseAdj); 2639 if (Latency < 1) 2640 return 1; 2641 } 2642 2643 if (Latency > 1 && 2644 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { 2645 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 2646 // variants are one cycle cheaper. 2647 switch (DefMCID->getOpcode()) { 2648 default: break; 2649 case ARM::LDRrs: 2650 case ARM::LDRBrs: { 2651 unsigned ShOpVal = DefMI->getOperand(3).getImm(); 2652 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2653 if (ShImm == 0 || 2654 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 2655 --Latency; 2656 break; 2657 } 2658 case ARM::t2LDRs: 2659 case ARM::t2LDRBs: 2660 case ARM::t2LDRHs: 2661 case ARM::t2LDRSHs: { 2662 // Thumb2 mode: lsl only. 2663 unsigned ShAmt = DefMI->getOperand(3).getImm(); 2664 if (ShAmt == 0 || ShAmt == 2) 2665 --Latency; 2666 break; 2667 } 2668 } 2669 } 2670 2671 if (DefAlign < 8 && Subtarget.isCortexA9()) 2672 switch (DefMCID->getOpcode()) { 2673 default: break; 2674 case ARM::VLD1q8: 2675 case ARM::VLD1q16: 2676 case ARM::VLD1q32: 2677 case ARM::VLD1q64: 2678 case ARM::VLD1q8wb_fixed: 2679 case ARM::VLD1q16wb_fixed: 2680 case ARM::VLD1q32wb_fixed: 2681 case ARM::VLD1q64wb_fixed: 2682 case ARM::VLD1q8wb_register: 2683 case ARM::VLD1q16wb_register: 2684 case ARM::VLD1q32wb_register: 2685 case ARM::VLD1q64wb_register: 2686 case ARM::VLD2d8: 2687 case ARM::VLD2d16: 2688 case ARM::VLD2d32: 2689 case ARM::VLD2q8: 2690 case ARM::VLD2q16: 2691 case ARM::VLD2q32: 2692 case ARM::VLD2d8wb_fixed: 2693 case ARM::VLD2d16wb_fixed: 2694 case ARM::VLD2d32wb_fixed: 2695 case ARM::VLD2q8wb_fixed: 2696 case ARM::VLD2q16wb_fixed: 2697 case ARM::VLD2q32wb_fixed: 2698 case ARM::VLD2d8wb_register: 2699 case ARM::VLD2d16wb_register: 2700 case ARM::VLD2d32wb_register: 2701 case ARM::VLD2q8wb_register: 2702 case ARM::VLD2q16wb_register: 2703 case ARM::VLD2q32wb_register: 2704 case ARM::VLD3d8: 2705 case ARM::VLD3d16: 2706 case ARM::VLD3d32: 2707 case ARM::VLD1d64T: 2708 case ARM::VLD3d8_UPD: 2709 case ARM::VLD3d16_UPD: 2710 case ARM::VLD3d32_UPD: 2711 case ARM::VLD1d64Twb_fixed: 2712 case ARM::VLD1d64Twb_register: 2713 case ARM::VLD3q8_UPD: 2714 case ARM::VLD3q16_UPD: 2715 case ARM::VLD3q32_UPD: 2716 case ARM::VLD4d8: 2717 case ARM::VLD4d16: 2718 case ARM::VLD4d32: 2719 case ARM::VLD1d64Q: 2720 case ARM::VLD4d8_UPD: 2721 case ARM::VLD4d16_UPD: 2722 case ARM::VLD4d32_UPD: 2723 case ARM::VLD1d64Qwb_fixed: 2724 case ARM::VLD1d64Qwb_register: 2725 case ARM::VLD4q8_UPD: 2726 case ARM::VLD4q16_UPD: 2727 case ARM::VLD4q32_UPD: 2728 case ARM::VLD1DUPq8: 2729 case ARM::VLD1DUPq16: 2730 case ARM::VLD1DUPq32: 2731 case ARM::VLD1DUPq8wb_fixed: 2732 case ARM::VLD1DUPq16wb_fixed: 2733 case ARM::VLD1DUPq32wb_fixed: 2734 case ARM::VLD1DUPq8wb_register: 2735 case ARM::VLD1DUPq16wb_register: 2736 case ARM::VLD1DUPq32wb_register: 2737 case ARM::VLD2DUPd8: 2738 case ARM::VLD2DUPd16: 2739 case ARM::VLD2DUPd32: 2740 case ARM::VLD2DUPd8wb_fixed: 2741 case ARM::VLD2DUPd16wb_fixed: 2742 case ARM::VLD2DUPd32wb_fixed: 2743 case ARM::VLD2DUPd8wb_register: 2744 case ARM::VLD2DUPd16wb_register: 2745 case ARM::VLD2DUPd32wb_register: 2746 case ARM::VLD4DUPd8: 2747 case ARM::VLD4DUPd16: 2748 case ARM::VLD4DUPd32: 2749 case ARM::VLD4DUPd8_UPD: 2750 case ARM::VLD4DUPd16_UPD: 2751 case ARM::VLD4DUPd32_UPD: 2752 case ARM::VLD1LNd8: 2753 case ARM::VLD1LNd16: 2754 case ARM::VLD1LNd32: 2755 case ARM::VLD1LNd8_UPD: 2756 case ARM::VLD1LNd16_UPD: 2757 case ARM::VLD1LNd32_UPD: 2758 case ARM::VLD2LNd8: 2759 case ARM::VLD2LNd16: 2760 case ARM::VLD2LNd32: 2761 case ARM::VLD2LNq16: 2762 case ARM::VLD2LNq32: 2763 case ARM::VLD2LNd8_UPD: 2764 case ARM::VLD2LNd16_UPD: 2765 case ARM::VLD2LNd32_UPD: 2766 case ARM::VLD2LNq16_UPD: 2767 case ARM::VLD2LNq32_UPD: 2768 case ARM::VLD4LNd8: 2769 case ARM::VLD4LNd16: 2770 case ARM::VLD4LNd32: 2771 case ARM::VLD4LNq16: 2772 case ARM::VLD4LNq32: 2773 case ARM::VLD4LNd8_UPD: 2774 case ARM::VLD4LNd16_UPD: 2775 case ARM::VLD4LNd32_UPD: 2776 case ARM::VLD4LNq16_UPD: 2777 case ARM::VLD4LNq32_UPD: 2778 // If the address is not 64-bit aligned, the latencies of these 2779 // instructions increases by one. 2780 ++Latency; 2781 break; 2782 } 2783 2784 return Latency; 2785} 2786 2787int 2788ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2789 SDNode *DefNode, unsigned DefIdx, 2790 SDNode *UseNode, unsigned UseIdx) const { 2791 if (!DefNode->isMachineOpcode()) 2792 return 1; 2793 2794 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 2795 2796 if (isZeroCost(DefMCID.Opcode)) 2797 return 0; 2798 2799 if (!ItinData || ItinData->isEmpty()) 2800 return DefMCID.mayLoad() ? 3 : 1; 2801 2802 if (!UseNode->isMachineOpcode()) { 2803 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 2804 if (Subtarget.isCortexA9()) 2805 return Latency <= 2 ? 1 : Latency - 1; 2806 else 2807 return Latency <= 3 ? 1 : Latency - 2; 2808 } 2809 2810 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 2811 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode); 2812 unsigned DefAlign = !DefMN->memoperands_empty() 2813 ? (*DefMN->memoperands_begin())->getAlignment() : 0; 2814 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode); 2815 unsigned UseAlign = !UseMN->memoperands_empty() 2816 ? (*UseMN->memoperands_begin())->getAlignment() : 0; 2817 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 2818 UseMCID, UseIdx, UseAlign); 2819 2820 if (Latency > 1 && 2821 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { 2822 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 2823 // variants are one cycle cheaper. 2824 switch (DefMCID.getOpcode()) { 2825 default: break; 2826 case ARM::LDRrs: 2827 case ARM::LDRBrs: { 2828 unsigned ShOpVal = 2829 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 2830 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2831 if (ShImm == 0 || 2832 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 2833 --Latency; 2834 break; 2835 } 2836 case ARM::t2LDRs: 2837 case ARM::t2LDRBs: 2838 case ARM::t2LDRHs: 2839 case ARM::t2LDRSHs: { 2840 // Thumb2 mode: lsl only. 2841 unsigned ShAmt = 2842 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 2843 if (ShAmt == 0 || ShAmt == 2) 2844 --Latency; 2845 break; 2846 } 2847 } 2848 } 2849 2850 if (DefAlign < 8 && Subtarget.isCortexA9()) 2851 switch (DefMCID.getOpcode()) { 2852 default: break; 2853 case ARM::VLD1q8: 2854 case ARM::VLD1q16: 2855 case ARM::VLD1q32: 2856 case ARM::VLD1q64: 2857 case ARM::VLD1q8wb_register: 2858 case ARM::VLD1q16wb_register: 2859 case ARM::VLD1q32wb_register: 2860 case ARM::VLD1q64wb_register: 2861 case ARM::VLD1q8wb_fixed: 2862 case ARM::VLD1q16wb_fixed: 2863 case ARM::VLD1q32wb_fixed: 2864 case ARM::VLD1q64wb_fixed: 2865 case ARM::VLD2d8: 2866 case ARM::VLD2d16: 2867 case ARM::VLD2d32: 2868 case ARM::VLD2q8Pseudo: 2869 case ARM::VLD2q16Pseudo: 2870 case ARM::VLD2q32Pseudo: 2871 case ARM::VLD2d8wb_fixed: 2872 case ARM::VLD2d16wb_fixed: 2873 case ARM::VLD2d32wb_fixed: 2874 case ARM::VLD2q8PseudoWB_fixed: 2875 case ARM::VLD2q16PseudoWB_fixed: 2876 case ARM::VLD2q32PseudoWB_fixed: 2877 case ARM::VLD2d8wb_register: 2878 case ARM::VLD2d16wb_register: 2879 case ARM::VLD2d32wb_register: 2880 case ARM::VLD2q8PseudoWB_register: 2881 case ARM::VLD2q16PseudoWB_register: 2882 case ARM::VLD2q32PseudoWB_register: 2883 case ARM::VLD3d8Pseudo: 2884 case ARM::VLD3d16Pseudo: 2885 case ARM::VLD3d32Pseudo: 2886 case ARM::VLD1d64TPseudo: 2887 case ARM::VLD3d8Pseudo_UPD: 2888 case ARM::VLD3d16Pseudo_UPD: 2889 case ARM::VLD3d32Pseudo_UPD: 2890 case ARM::VLD3q8Pseudo_UPD: 2891 case ARM::VLD3q16Pseudo_UPD: 2892 case ARM::VLD3q32Pseudo_UPD: 2893 case ARM::VLD3q8oddPseudo: 2894 case ARM::VLD3q16oddPseudo: 2895 case ARM::VLD3q32oddPseudo: 2896 case ARM::VLD3q8oddPseudo_UPD: 2897 case ARM::VLD3q16oddPseudo_UPD: 2898 case ARM::VLD3q32oddPseudo_UPD: 2899 case ARM::VLD4d8Pseudo: 2900 case ARM::VLD4d16Pseudo: 2901 case ARM::VLD4d32Pseudo: 2902 case ARM::VLD1d64QPseudo: 2903 case ARM::VLD4d8Pseudo_UPD: 2904 case ARM::VLD4d16Pseudo_UPD: 2905 case ARM::VLD4d32Pseudo_UPD: 2906 case ARM::VLD4q8Pseudo_UPD: 2907 case ARM::VLD4q16Pseudo_UPD: 2908 case ARM::VLD4q32Pseudo_UPD: 2909 case ARM::VLD4q8oddPseudo: 2910 case ARM::VLD4q16oddPseudo: 2911 case ARM::VLD4q32oddPseudo: 2912 case ARM::VLD4q8oddPseudo_UPD: 2913 case ARM::VLD4q16oddPseudo_UPD: 2914 case ARM::VLD4q32oddPseudo_UPD: 2915 case ARM::VLD1DUPq8: 2916 case ARM::VLD1DUPq16: 2917 case ARM::VLD1DUPq32: 2918 case ARM::VLD1DUPq8wb_fixed: 2919 case ARM::VLD1DUPq16wb_fixed: 2920 case ARM::VLD1DUPq32wb_fixed: 2921 case ARM::VLD1DUPq8wb_register: 2922 case ARM::VLD1DUPq16wb_register: 2923 case ARM::VLD1DUPq32wb_register: 2924 case ARM::VLD2DUPd8: 2925 case ARM::VLD2DUPd16: 2926 case ARM::VLD2DUPd32: 2927 case ARM::VLD2DUPd8wb_fixed: 2928 case ARM::VLD2DUPd16wb_fixed: 2929 case ARM::VLD2DUPd32wb_fixed: 2930 case ARM::VLD2DUPd8wb_register: 2931 case ARM::VLD2DUPd16wb_register: 2932 case ARM::VLD2DUPd32wb_register: 2933 case ARM::VLD4DUPd8Pseudo: 2934 case ARM::VLD4DUPd16Pseudo: 2935 case ARM::VLD4DUPd32Pseudo: 2936 case ARM::VLD4DUPd8Pseudo_UPD: 2937 case ARM::VLD4DUPd16Pseudo_UPD: 2938 case ARM::VLD4DUPd32Pseudo_UPD: 2939 case ARM::VLD1LNq8Pseudo: 2940 case ARM::VLD1LNq16Pseudo: 2941 case ARM::VLD1LNq32Pseudo: 2942 case ARM::VLD1LNq8Pseudo_UPD: 2943 case ARM::VLD1LNq16Pseudo_UPD: 2944 case ARM::VLD1LNq32Pseudo_UPD: 2945 case ARM::VLD2LNd8Pseudo: 2946 case ARM::VLD2LNd16Pseudo: 2947 case ARM::VLD2LNd32Pseudo: 2948 case ARM::VLD2LNq16Pseudo: 2949 case ARM::VLD2LNq32Pseudo: 2950 case ARM::VLD2LNd8Pseudo_UPD: 2951 case ARM::VLD2LNd16Pseudo_UPD: 2952 case ARM::VLD2LNd32Pseudo_UPD: 2953 case ARM::VLD2LNq16Pseudo_UPD: 2954 case ARM::VLD2LNq32Pseudo_UPD: 2955 case ARM::VLD4LNd8Pseudo: 2956 case ARM::VLD4LNd16Pseudo: 2957 case ARM::VLD4LNd32Pseudo: 2958 case ARM::VLD4LNq16Pseudo: 2959 case ARM::VLD4LNq32Pseudo: 2960 case ARM::VLD4LNd8Pseudo_UPD: 2961 case ARM::VLD4LNd16Pseudo_UPD: 2962 case ARM::VLD4LNd32Pseudo_UPD: 2963 case ARM::VLD4LNq16Pseudo_UPD: 2964 case ARM::VLD4LNq32Pseudo_UPD: 2965 // If the address is not 64-bit aligned, the latencies of these 2966 // instructions increases by one. 2967 ++Latency; 2968 break; 2969 } 2970 2971 return Latency; 2972} 2973 2974unsigned 2975ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData, 2976 const MachineInstr *DefMI, unsigned DefIdx, 2977 const MachineInstr *DepMI) const { 2978 unsigned Reg = DefMI->getOperand(DefIdx).getReg(); 2979 if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI)) 2980 return 1; 2981 2982 // If the second MI is predicated, then there is an implicit use dependency. 2983 return getOperandLatency(ItinData, DefMI, DefIdx, DepMI, 2984 DepMI->getNumOperands()); 2985} 2986 2987unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 2988 const MachineInstr *MI, 2989 unsigned *PredCost) const { 2990 if (MI->isCopyLike() || MI->isInsertSubreg() || 2991 MI->isRegSequence() || MI->isImplicitDef()) 2992 return 1; 2993 2994 // An instruction scheduler typically runs on unbundled instructions, however 2995 // other passes may query the latency of a bundled instruction. 2996 if (MI->isBundle()) { 2997 unsigned Latency = 0; 2998 MachineBasicBlock::const_instr_iterator I = MI; 2999 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 3000 while (++I != E && I->isInsideBundle()) { 3001 if (I->getOpcode() != ARM::t2IT) 3002 Latency += getInstrLatency(ItinData, I, PredCost); 3003 } 3004 return Latency; 3005 } 3006 3007 const MCInstrDesc &MCID = MI->getDesc(); 3008 if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) { 3009 // When predicated, CPSR is an additional source operand for CPSR updating 3010 // instructions, this apparently increases their latencies. 3011 *PredCost = 1; 3012 } 3013 // Be sure to call getStageLatency for an empty itinerary in case it has a 3014 // valid MinLatency property. 3015 if (!ItinData) 3016 return MI->mayLoad() ? 3 : 1; 3017 3018 unsigned Class = MCID.getSchedClass(); 3019 3020 // For instructions with variable uops, use uops as latency. 3021 if (!ItinData->isEmpty() && !ItinData->Itineraries[Class].NumMicroOps) { 3022 return getNumMicroOps(ItinData, MI); 3023 } 3024 // For the common case, fall back on the itinerary's latency. 3025 return ItinData->getStageLatency(Class); 3026} 3027 3028int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 3029 SDNode *Node) const { 3030 if (!Node->isMachineOpcode()) 3031 return 1; 3032 3033 if (!ItinData || ItinData->isEmpty()) 3034 return 1; 3035 3036 unsigned Opcode = Node->getMachineOpcode(); 3037 switch (Opcode) { 3038 default: 3039 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 3040 case ARM::VLDMQIA: 3041 case ARM::VSTMQIA: 3042 return 2; 3043 } 3044} 3045 3046bool ARMBaseInstrInfo:: 3047hasHighOperandLatency(const InstrItineraryData *ItinData, 3048 const MachineRegisterInfo *MRI, 3049 const MachineInstr *DefMI, unsigned DefIdx, 3050 const MachineInstr *UseMI, unsigned UseIdx) const { 3051 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 3052 unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask; 3053 if (Subtarget.isCortexA8() && 3054 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 3055 // CortexA8 VFP instructions are not pipelined. 3056 return true; 3057 3058 // Hoist VFP / NEON instructions with 4 or higher latency. 3059 int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); 3060 if (Latency <= 3) 3061 return false; 3062 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 3063 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 3064} 3065 3066bool ARMBaseInstrInfo:: 3067hasLowDefLatency(const InstrItineraryData *ItinData, 3068 const MachineInstr *DefMI, unsigned DefIdx) const { 3069 if (!ItinData || ItinData->isEmpty()) 3070 return false; 3071 3072 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 3073 if (DDomain == ARMII::DomainGeneral) { 3074 unsigned DefClass = DefMI->getDesc().getSchedClass(); 3075 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 3076 return (DefCycle != -1 && DefCycle <= 2); 3077 } 3078 return false; 3079} 3080 3081bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI, 3082 StringRef &ErrInfo) const { 3083 if (convertAddSubFlagsOpcode(MI->getOpcode())) { 3084 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 3085 return false; 3086 } 3087 return true; 3088} 3089 3090bool 3091ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 3092 unsigned &AddSubOpc, 3093 bool &NegAcc, bool &HasLane) const { 3094 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 3095 if (I == MLxEntryMap.end()) 3096 return false; 3097 3098 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 3099 MulOpc = Entry.MulOpc; 3100 AddSubOpc = Entry.AddSubOpc; 3101 NegAcc = Entry.NegAcc; 3102 HasLane = Entry.HasLane; 3103 return true; 3104} 3105 3106//===----------------------------------------------------------------------===// 3107// Execution domains. 3108//===----------------------------------------------------------------------===// 3109// 3110// Some instructions go down the NEON pipeline, some go down the VFP pipeline, 3111// and some can go down both. The vmov instructions go down the VFP pipeline, 3112// but they can be changed to vorr equivalents that are executed by the NEON 3113// pipeline. 3114// 3115// We use the following execution domain numbering: 3116// 3117enum ARMExeDomain { 3118 ExeGeneric = 0, 3119 ExeVFP = 1, 3120 ExeNEON = 2 3121}; 3122// 3123// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 3124// 3125std::pair<uint16_t, uint16_t> 3126ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { 3127 // VMOVD is a VFP instruction, but can be changed to NEON if it isn't 3128 // predicated. 3129 if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) 3130 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); 3131 3132 // No other instructions can be swizzled, so just determine their domain. 3133 unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask; 3134 3135 if (Domain & ARMII::DomainNEON) 3136 return std::make_pair(ExeNEON, 0); 3137 3138 // Certain instructions can go either way on Cortex-A8. 3139 // Treat them as NEON instructions. 3140 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 3141 return std::make_pair(ExeNEON, 0); 3142 3143 if (Domain & ARMII::DomainVFP) 3144 return std::make_pair(ExeVFP, 0); 3145 3146 return std::make_pair(ExeGeneric, 0); 3147} 3148 3149void 3150ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 3151 // We only know how to change VMOVD into VORR. 3152 assert(MI->getOpcode() == ARM::VMOVD && "Can only swizzle VMOVD"); 3153 if (Domain != ExeNEON) 3154 return; 3155 3156 // Zap the predicate operands. 3157 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 3158 MI->RemoveOperand(3); 3159 MI->RemoveOperand(2); 3160 3161 // Change to a VORRd which requires two identical use operands. 3162 MI->setDesc(get(ARM::VORRd)); 3163 3164 // Add the extra source operand and new predicates. 3165 // This will go before any implicit ops. 3166 AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1))); 3167} 3168 3169bool ARMBaseInstrInfo::hasNOP() const { 3170 return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0; 3171} 3172