ARMFastISel.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseRegisterInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMSubtarget.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/ADT/STLExtras.h" 25#include "llvm/CodeGen/Analysis.h" 26#include "llvm/CodeGen/FastISel.h" 27#include "llvm/CodeGen/FunctionLoweringInfo.h" 28#include "llvm/CodeGen/MachineConstantPool.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineInstrBuilder.h" 31#include "llvm/CodeGen/MachineMemOperand.h" 32#include "llvm/CodeGen/MachineModuleInfo.h" 33#include "llvm/CodeGen/MachineRegisterInfo.h" 34#include "llvm/IR/CallSite.h" 35#include "llvm/IR/CallingConv.h" 36#include "llvm/IR/DataLayout.h" 37#include "llvm/IR/DerivedTypes.h" 38#include "llvm/IR/GetElementPtrTypeIterator.h" 39#include "llvm/IR/GlobalVariable.h" 40#include "llvm/IR/Instructions.h" 41#include "llvm/IR/IntrinsicInst.h" 42#include "llvm/IR/Module.h" 43#include "llvm/IR/Operator.h" 44#include "llvm/Support/CommandLine.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Target/TargetInstrInfo.h" 47#include "llvm/Target/TargetLowering.h" 48#include "llvm/Target/TargetMachine.h" 49#include "llvm/Target/TargetOptions.h" 50using namespace llvm; 51 52extern cl::opt<bool> EnableARMLongCalls; 53 54namespace { 55 56 // All possible address modes, plus some. 57 typedef struct Address { 58 enum { 59 RegBase, 60 FrameIndexBase 61 } BaseType; 62 63 union { 64 unsigned Reg; 65 int FI; 66 } Base; 67 68 int Offset; 69 70 // Innocuous defaults for our address. 71 Address() 72 : BaseType(RegBase), Offset(0) { 73 Base.Reg = 0; 74 } 75 } Address; 76 77class ARMFastISel final : public FastISel { 78 79 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 80 /// make the right decision when generating code for different targets. 81 const ARMSubtarget *Subtarget; 82 Module &M; 83 const TargetMachine &TM; 84 const TargetInstrInfo &TII; 85 const TargetLowering &TLI; 86 ARMFunctionInfo *AFI; 87 88 // Convenience variables to avoid some queries. 89 bool isThumb2; 90 LLVMContext *Context; 91 92 public: 93 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 94 const TargetLibraryInfo *libInfo) 95 : FastISel(funcInfo, libInfo), 96 M(const_cast<Module&>(*funcInfo.Fn->getParent())), 97 TM(funcInfo.MF->getTarget()), 98 TII(*TM.getInstrInfo()), 99 TLI(*TM.getTargetLowering()) { 100 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 101 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 102 isThumb2 = AFI->isThumbFunction(); 103 Context = &funcInfo.Fn->getContext(); 104 } 105 106 // Code from FastISel.cpp. 107 private: 108 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC, 110 unsigned Op0, bool Op0IsKill); 111 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill); 115 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 116 const TargetRegisterClass *RC, 117 unsigned Op0, bool Op0IsKill, 118 unsigned Op1, bool Op1IsKill, 119 unsigned Op2, bool Op2IsKill); 120 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 uint64_t Imm); 124 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 unsigned Op1, bool Op1IsKill, 128 uint64_t Imm); 129 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 uint64_t Imm); 132 133 // Backend specific FastISel code. 134 private: 135 bool TargetSelectInstruction(const Instruction *I) override; 136 unsigned TargetMaterializeConstant(const Constant *C) override; 137 unsigned TargetMaterializeAlloca(const AllocaInst *AI) override; 138 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 139 const LoadInst *LI) override; 140 bool FastLowerArguments() override; 141 private: 142 #include "ARMGenFastISel.inc" 143 144 // Instruction selection routines. 145 private: 146 bool SelectLoad(const Instruction *I); 147 bool SelectStore(const Instruction *I); 148 bool SelectBranch(const Instruction *I); 149 bool SelectIndirectBr(const Instruction *I); 150 bool SelectCmp(const Instruction *I); 151 bool SelectFPExt(const Instruction *I); 152 bool SelectFPTrunc(const Instruction *I); 153 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 154 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 155 bool SelectIToFP(const Instruction *I, bool isSigned); 156 bool SelectFPToI(const Instruction *I, bool isSigned); 157 bool SelectDiv(const Instruction *I, bool isSigned); 158 bool SelectRem(const Instruction *I, bool isSigned); 159 bool SelectCall(const Instruction *I, const char *IntrMemName); 160 bool SelectIntrinsicCall(const IntrinsicInst &I); 161 bool SelectSelect(const Instruction *I); 162 bool SelectRet(const Instruction *I); 163 bool SelectTrunc(const Instruction *I); 164 bool SelectIntExt(const Instruction *I); 165 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 166 167 // Utility routines. 168 private: 169 unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned OpNum, 170 unsigned Op); 171 bool isTypeLegal(Type *Ty, MVT &VT); 172 bool isLoadTypeLegal(Type *Ty, MVT &VT); 173 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 174 bool isZExt); 175 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 176 unsigned Alignment = 0, bool isZExt = true, 177 bool allocReg = true); 178 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 179 unsigned Alignment = 0); 180 bool ARMComputeAddress(const Value *Obj, Address &Addr); 181 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 182 bool ARMIsMemCpySmall(uint64_t Len); 183 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 184 unsigned Alignment); 185 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 186 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 187 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 188 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 189 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 190 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 191 unsigned ARMSelectCallOp(bool UseReg); 192 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 193 194 // Call handling routines. 195 private: 196 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 197 bool Return, 198 bool isVarArg); 199 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 200 SmallVectorImpl<unsigned> &ArgRegs, 201 SmallVectorImpl<MVT> &ArgVTs, 202 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 203 SmallVectorImpl<unsigned> &RegArgs, 204 CallingConv::ID CC, 205 unsigned &NumBytes, 206 bool isVarArg); 207 unsigned getLibcallReg(const Twine &Name); 208 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 209 const Instruction *I, CallingConv::ID CC, 210 unsigned &NumBytes, bool isVarArg); 211 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 212 213 // OptionalDef handling routines. 214 private: 215 bool isARMNEONPred(const MachineInstr *MI); 216 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 217 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 218 void AddLoadStoreOperands(MVT VT, Address &Addr, 219 const MachineInstrBuilder &MIB, 220 unsigned Flags, bool useAM3); 221}; 222 223} // end anonymous namespace 224 225#include "ARMGenCallingConv.inc" 226 227// DefinesOptionalPredicate - This is different from DefinesPredicate in that 228// we don't care about implicit defs here, just places we'll need to add a 229// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 230bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 231 if (!MI->hasOptionalDef()) 232 return false; 233 234 // Look to see if our OptionalDef is defining CPSR or CCR. 235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 236 const MachineOperand &MO = MI->getOperand(i); 237 if (!MO.isReg() || !MO.isDef()) continue; 238 if (MO.getReg() == ARM::CPSR) 239 *CPSR = true; 240 } 241 return true; 242} 243 244bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 245 const MCInstrDesc &MCID = MI->getDesc(); 246 247 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 248 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 249 AFI->isThumb2Function()) 250 return MI->isPredicable(); 251 252 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 253 if (MCID.OpInfo[i].isPredicate()) 254 return true; 255 256 return false; 257} 258 259// If the machine is predicable go ahead and add the predicate operands, if 260// it needs default CC operands add those. 261// TODO: If we want to support thumb1 then we'll need to deal with optional 262// CPSR defs that need to be added before the remaining operands. See s_cc_out 263// for descriptions why. 264const MachineInstrBuilder & 265ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 266 MachineInstr *MI = &*MIB; 267 268 // Do we use a predicate? or... 269 // Are we NEON in ARM mode and have a predicate operand? If so, I know 270 // we're not predicable but add it anyways. 271 if (isARMNEONPred(MI)) 272 AddDefaultPred(MIB); 273 274 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 275 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 276 bool CPSR = false; 277 if (DefinesOptionalPredicate(MI, &CPSR)) { 278 if (CPSR) 279 AddDefaultT1CC(MIB); 280 else 281 AddDefaultCC(MIB); 282 } 283 return MIB; 284} 285 286unsigned ARMFastISel::constrainOperandRegClass(const MCInstrDesc &II, 287 unsigned Op, unsigned OpNum) { 288 if (TargetRegisterInfo::isVirtualRegister(Op)) { 289 const TargetRegisterClass *RegClass = 290 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 291 if (!MRI.constrainRegClass(Op, RegClass)) { 292 // If it's not legal to COPY between the register classes, something 293 // has gone very wrong before we got here. 294 unsigned NewOp = createResultReg(RegClass); 295 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 296 TII.get(TargetOpcode::COPY), NewOp).addReg(Op)); 297 return NewOp; 298 } 299 } 300 return Op; 301} 302 303unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 304 const TargetRegisterClass *RC, 305 unsigned Op0, bool Op0IsKill) { 306 unsigned ResultReg = createResultReg(RC); 307 const MCInstrDesc &II = TII.get(MachineInstOpcode); 308 309 // Make sure the input operand is sufficiently constrained to be legal 310 // for this instruction. 311 Op0 = constrainOperandRegClass(II, Op0, 1); 312 if (II.getNumDefs() >= 1) { 313 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 314 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); 315 } else { 316 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 317 .addReg(Op0, Op0IsKill * RegState::Kill)); 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 319 TII.get(TargetOpcode::COPY), ResultReg) 320 .addReg(II.ImplicitDefs[0])); 321 } 322 return ResultReg; 323} 324 325unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 326 const TargetRegisterClass *RC, 327 unsigned Op0, bool Op0IsKill, 328 unsigned Op1, bool Op1IsKill) { 329 unsigned ResultReg = createResultReg(RC); 330 const MCInstrDesc &II = TII.get(MachineInstOpcode); 331 332 // Make sure the input operands are sufficiently constrained to be legal 333 // for this instruction. 334 Op0 = constrainOperandRegClass(II, Op0, 1); 335 Op1 = constrainOperandRegClass(II, Op1, 2); 336 337 if (II.getNumDefs() >= 1) { 338 AddOptionalDefs( 339 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 340 .addReg(Op0, Op0IsKill * RegState::Kill) 341 .addReg(Op1, Op1IsKill * RegState::Kill)); 342 } else { 343 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 344 .addReg(Op0, Op0IsKill * RegState::Kill) 345 .addReg(Op1, Op1IsKill * RegState::Kill)); 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 347 TII.get(TargetOpcode::COPY), ResultReg) 348 .addReg(II.ImplicitDefs[0])); 349 } 350 return ResultReg; 351} 352 353unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 354 const TargetRegisterClass *RC, 355 unsigned Op0, bool Op0IsKill, 356 unsigned Op1, bool Op1IsKill, 357 unsigned Op2, bool Op2IsKill) { 358 unsigned ResultReg = createResultReg(RC); 359 const MCInstrDesc &II = TII.get(MachineInstOpcode); 360 361 // Make sure the input operands are sufficiently constrained to be legal 362 // for this instruction. 363 Op0 = constrainOperandRegClass(II, Op0, 1); 364 Op1 = constrainOperandRegClass(II, Op1, 2); 365 Op2 = constrainOperandRegClass(II, Op1, 3); 366 367 if (II.getNumDefs() >= 1) { 368 AddOptionalDefs( 369 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addReg(Op1, Op1IsKill * RegState::Kill) 372 .addReg(Op2, Op2IsKill * RegState::Kill)); 373 } else { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addReg(Op1, Op1IsKill * RegState::Kill) 377 .addReg(Op2, Op2IsKill * RegState::Kill)); 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 379 TII.get(TargetOpcode::COPY), ResultReg) 380 .addReg(II.ImplicitDefs[0])); 381 } 382 return ResultReg; 383} 384 385unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 386 const TargetRegisterClass *RC, 387 unsigned Op0, bool Op0IsKill, 388 uint64_t Imm) { 389 unsigned ResultReg = createResultReg(RC); 390 const MCInstrDesc &II = TII.get(MachineInstOpcode); 391 392 // Make sure the input operand is sufficiently constrained to be legal 393 // for this instruction. 394 Op0 = constrainOperandRegClass(II, Op0, 1); 395 if (II.getNumDefs() >= 1) { 396 AddOptionalDefs( 397 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 398 .addReg(Op0, Op0IsKill * RegState::Kill) 399 .addImm(Imm)); 400 } else { 401 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 402 .addReg(Op0, Op0IsKill * RegState::Kill) 403 .addImm(Imm)); 404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 405 TII.get(TargetOpcode::COPY), ResultReg) 406 .addReg(II.ImplicitDefs[0])); 407 } 408 return ResultReg; 409} 410 411unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 412 const TargetRegisterClass *RC, 413 unsigned Op0, bool Op0IsKill, 414 unsigned Op1, bool Op1IsKill, 415 uint64_t Imm) { 416 unsigned ResultReg = createResultReg(RC); 417 const MCInstrDesc &II = TII.get(MachineInstOpcode); 418 419 // Make sure the input operands are sufficiently constrained to be legal 420 // for this instruction. 421 Op0 = constrainOperandRegClass(II, Op0, 1); 422 Op1 = constrainOperandRegClass(II, Op1, 2); 423 if (II.getNumDefs() >= 1) { 424 AddOptionalDefs( 425 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 426 .addReg(Op0, Op0IsKill * RegState::Kill) 427 .addReg(Op1, Op1IsKill * RegState::Kill) 428 .addImm(Imm)); 429 } else { 430 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 431 .addReg(Op0, Op0IsKill * RegState::Kill) 432 .addReg(Op1, Op1IsKill * RegState::Kill) 433 .addImm(Imm)); 434 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 435 TII.get(TargetOpcode::COPY), ResultReg) 436 .addReg(II.ImplicitDefs[0])); 437 } 438 return ResultReg; 439} 440 441unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 442 const TargetRegisterClass *RC, 443 uint64_t Imm) { 444 unsigned ResultReg = createResultReg(RC); 445 const MCInstrDesc &II = TII.get(MachineInstOpcode); 446 447 if (II.getNumDefs() >= 1) { 448 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 449 ResultReg).addImm(Imm)); 450 } else { 451 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 452 .addImm(Imm)); 453 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 454 TII.get(TargetOpcode::COPY), ResultReg) 455 .addReg(II.ImplicitDefs[0])); 456 } 457 return ResultReg; 458} 459 460// TODO: Don't worry about 64-bit now, but when this is fixed remove the 461// checks from the various callers. 462unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 463 if (VT == MVT::f64) return 0; 464 465 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 467 TII.get(ARM::VMOVSR), MoveReg) 468 .addReg(SrcReg)); 469 return MoveReg; 470} 471 472unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 473 if (VT == MVT::i64) return 0; 474 475 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 476 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 477 TII.get(ARM::VMOVRS), MoveReg) 478 .addReg(SrcReg)); 479 return MoveReg; 480} 481 482// For double width floating point we need to materialize two constants 483// (the high and the low) into integer registers then use a move to get 484// the combined constant into an FP reg. 485unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 486 const APFloat Val = CFP->getValueAPF(); 487 bool is64bit = VT == MVT::f64; 488 489 // This checks to see if we can use VFP3 instructions to materialize 490 // a constant, otherwise we have to go through the constant pool. 491 if (TLI.isFPImmLegal(Val, VT)) { 492 int Imm; 493 unsigned Opc; 494 if (is64bit) { 495 Imm = ARM_AM::getFP64Imm(Val); 496 Opc = ARM::FCONSTD; 497 } else { 498 Imm = ARM_AM::getFP32Imm(Val); 499 Opc = ARM::FCONSTS; 500 } 501 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 502 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 503 TII.get(Opc), DestReg).addImm(Imm)); 504 return DestReg; 505 } 506 507 // Require VFP2 for loading fp constants. 508 if (!Subtarget->hasVFP2()) return false; 509 510 // MachineConstantPool wants an explicit alignment. 511 unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 512 if (Align == 0) { 513 // TODO: Figure out if this is correct. 514 Align = DL.getTypeAllocSize(CFP->getType()); 515 } 516 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 517 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 518 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 519 520 // The extra reg is for addrmode5. 521 AddOptionalDefs( 522 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 523 .addConstantPoolIndex(Idx) 524 .addReg(0)); 525 return DestReg; 526} 527 528unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 529 530 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 531 return false; 532 533 // If we can do this in a single instruction without a constant pool entry 534 // do so now. 535 const ConstantInt *CI = cast<ConstantInt>(C); 536 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 537 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 538 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 539 &ARM::GPRRegClass; 540 unsigned ImmReg = createResultReg(RC); 541 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 542 TII.get(Opc), ImmReg) 543 .addImm(CI->getZExtValue())); 544 return ImmReg; 545 } 546 547 // Use MVN to emit negative constants. 548 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 549 unsigned Imm = (unsigned)~(CI->getSExtValue()); 550 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 551 (ARM_AM::getSOImmVal(Imm) != -1); 552 if (UseImm) { 553 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 554 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 555 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 556 TII.get(Opc), ImmReg) 557 .addImm(Imm)); 558 return ImmReg; 559 } 560 } 561 562 // Load from constant pool. For now 32-bit only. 563 if (VT != MVT::i32) 564 return false; 565 566 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 567 568 // MachineConstantPool wants an explicit alignment. 569 unsigned Align = DL.getPrefTypeAlignment(C->getType()); 570 if (Align == 0) { 571 // TODO: Figure out if this is correct. 572 Align = DL.getTypeAllocSize(C->getType()); 573 } 574 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 575 576 if (isThumb2) 577 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 578 TII.get(ARM::t2LDRpci), DestReg) 579 .addConstantPoolIndex(Idx)); 580 else { 581 // The extra immediate is for addrmode2. 582 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 583 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 584 TII.get(ARM::LDRcp), DestReg) 585 .addConstantPoolIndex(Idx) 586 .addImm(0)); 587 } 588 589 return DestReg; 590} 591 592unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 593 // For now 32-bit only. 594 if (VT != MVT::i32) return 0; 595 596 Reloc::Model RelocM = TM.getRelocationModel(); 597 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 598 const TargetRegisterClass *RC = isThumb2 ? 599 (const TargetRegisterClass*)&ARM::rGPRRegClass : 600 (const TargetRegisterClass*)&ARM::GPRRegClass; 601 unsigned DestReg = createResultReg(RC); 602 603 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. 604 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 605 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 606 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; 607 608 // Use movw+movt when possible, it avoids constant pool entries. 609 // Non-darwin targets only support static movt relocations in FastISel. 610 if (Subtarget->useMovt() && 611 (Subtarget->isTargetMachO() || RelocM == Reloc::Static)) { 612 unsigned Opc; 613 unsigned char TF = 0; 614 if (Subtarget->isTargetMachO()) 615 TF = ARMII::MO_NONLAZY; 616 617 switch (RelocM) { 618 case Reloc::PIC_: 619 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 620 break; 621 default: 622 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 623 break; 624 } 625 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 626 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); 627 } else { 628 // MachineConstantPool wants an explicit alignment. 629 unsigned Align = DL.getPrefTypeAlignment(GV->getType()); 630 if (Align == 0) { 631 // TODO: Figure out if this is correct. 632 Align = DL.getTypeAllocSize(GV->getType()); 633 } 634 635 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 636 return ARMLowerPICELF(GV, Align, VT); 637 638 // Grab index. 639 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 640 (Subtarget->isThumb() ? 4 : 8); 641 unsigned Id = AFI->createPICLabelUId(); 642 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 643 ARMCP::CPValue, 644 PCAdj); 645 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 646 647 // Load value. 648 MachineInstrBuilder MIB; 649 if (isThumb2) { 650 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 651 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 652 DestReg).addConstantPoolIndex(Idx); 653 if (RelocM == Reloc::PIC_) 654 MIB.addImm(Id); 655 AddOptionalDefs(MIB); 656 } else { 657 // The extra immediate is for addrmode2. 658 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 659 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 660 TII.get(ARM::LDRcp), DestReg) 661 .addConstantPoolIndex(Idx) 662 .addImm(0); 663 AddOptionalDefs(MIB); 664 665 if (RelocM == Reloc::PIC_) { 666 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 667 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 668 669 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 670 DbgLoc, TII.get(Opc), NewDestReg) 671 .addReg(DestReg) 672 .addImm(Id); 673 AddOptionalDefs(MIB); 674 return NewDestReg; 675 } 676 } 677 } 678 679 if (IsIndirect) { 680 MachineInstrBuilder MIB; 681 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 682 if (isThumb2) 683 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 684 TII.get(ARM::t2LDRi12), NewDestReg) 685 .addReg(DestReg) 686 .addImm(0); 687 else 688 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 689 TII.get(ARM::LDRi12), NewDestReg) 690 .addReg(DestReg) 691 .addImm(0); 692 DestReg = NewDestReg; 693 AddOptionalDefs(MIB); 694 } 695 696 return DestReg; 697} 698 699unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 700 EVT CEVT = TLI.getValueType(C->getType(), true); 701 702 // Only handle simple types. 703 if (!CEVT.isSimple()) return 0; 704 MVT VT = CEVT.getSimpleVT(); 705 706 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 707 return ARMMaterializeFP(CFP, VT); 708 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 709 return ARMMaterializeGV(GV, VT); 710 else if (isa<ConstantInt>(C)) 711 return ARMMaterializeInt(C, VT); 712 713 return 0; 714} 715 716// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 717 718unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 719 // Don't handle dynamic allocas. 720 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 721 722 MVT VT; 723 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 724 725 DenseMap<const AllocaInst*, int>::iterator SI = 726 FuncInfo.StaticAllocaMap.find(AI); 727 728 // This will get lowered later into the correct offsets and registers 729 // via rewriteXFrameIndex. 730 if (SI != FuncInfo.StaticAllocaMap.end()) { 731 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 732 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 733 unsigned ResultReg = createResultReg(RC); 734 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); 735 736 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 737 TII.get(Opc), ResultReg) 738 .addFrameIndex(SI->second) 739 .addImm(0)); 740 return ResultReg; 741 } 742 743 return 0; 744} 745 746bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 747 EVT evt = TLI.getValueType(Ty, true); 748 749 // Only handle simple types. 750 if (evt == MVT::Other || !evt.isSimple()) return false; 751 VT = evt.getSimpleVT(); 752 753 // Handle all legal types, i.e. a register that will directly hold this 754 // value. 755 return TLI.isTypeLegal(VT); 756} 757 758bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 759 if (isTypeLegal(Ty, VT)) return true; 760 761 // If this is a type than can be sign or zero-extended to a basic operation 762 // go ahead and accept it now. 763 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 764 return true; 765 766 return false; 767} 768 769// Computes the address to get to an object. 770bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 771 // Some boilerplate from the X86 FastISel. 772 const User *U = NULL; 773 unsigned Opcode = Instruction::UserOp1; 774 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 775 // Don't walk into other basic blocks unless the object is an alloca from 776 // another block, otherwise it may not have a virtual register assigned. 777 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 778 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 779 Opcode = I->getOpcode(); 780 U = I; 781 } 782 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 783 Opcode = C->getOpcode(); 784 U = C; 785 } 786 787 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 788 if (Ty->getAddressSpace() > 255) 789 // Fast instruction selection doesn't support the special 790 // address spaces. 791 return false; 792 793 switch (Opcode) { 794 default: 795 break; 796 case Instruction::BitCast: 797 // Look through bitcasts. 798 return ARMComputeAddress(U->getOperand(0), Addr); 799 case Instruction::IntToPtr: 800 // Look past no-op inttoptrs. 801 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 802 return ARMComputeAddress(U->getOperand(0), Addr); 803 break; 804 case Instruction::PtrToInt: 805 // Look past no-op ptrtoints. 806 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 807 return ARMComputeAddress(U->getOperand(0), Addr); 808 break; 809 case Instruction::GetElementPtr: { 810 Address SavedAddr = Addr; 811 int TmpOffset = Addr.Offset; 812 813 // Iterate through the GEP folding the constants into offsets where 814 // we can. 815 gep_type_iterator GTI = gep_type_begin(U); 816 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 817 i != e; ++i, ++GTI) { 818 const Value *Op = *i; 819 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 820 const StructLayout *SL = DL.getStructLayout(STy); 821 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 822 TmpOffset += SL->getElementOffset(Idx); 823 } else { 824 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 825 for (;;) { 826 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 827 // Constant-offset addressing. 828 TmpOffset += CI->getSExtValue() * S; 829 break; 830 } 831 if (canFoldAddIntoGEP(U, Op)) { 832 // A compatible add with a constant operand. Fold the constant. 833 ConstantInt *CI = 834 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 835 TmpOffset += CI->getSExtValue() * S; 836 // Iterate on the other operand. 837 Op = cast<AddOperator>(Op)->getOperand(0); 838 continue; 839 } 840 // Unsupported 841 goto unsupported_gep; 842 } 843 } 844 } 845 846 // Try to grab the base operand now. 847 Addr.Offset = TmpOffset; 848 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 849 850 // We failed, restore everything and try the other options. 851 Addr = SavedAddr; 852 853 unsupported_gep: 854 break; 855 } 856 case Instruction::Alloca: { 857 const AllocaInst *AI = cast<AllocaInst>(Obj); 858 DenseMap<const AllocaInst*, int>::iterator SI = 859 FuncInfo.StaticAllocaMap.find(AI); 860 if (SI != FuncInfo.StaticAllocaMap.end()) { 861 Addr.BaseType = Address::FrameIndexBase; 862 Addr.Base.FI = SI->second; 863 return true; 864 } 865 break; 866 } 867 } 868 869 // Try to get this in a register if nothing else has worked. 870 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 871 return Addr.Base.Reg != 0; 872} 873 874void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 875 bool needsLowering = false; 876 switch (VT.SimpleTy) { 877 default: llvm_unreachable("Unhandled load/store type!"); 878 case MVT::i1: 879 case MVT::i8: 880 case MVT::i16: 881 case MVT::i32: 882 if (!useAM3) { 883 // Integer loads/stores handle 12-bit offsets. 884 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 885 // Handle negative offsets. 886 if (needsLowering && isThumb2) 887 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 888 Addr.Offset > -256); 889 } else { 890 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 891 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 892 } 893 break; 894 case MVT::f32: 895 case MVT::f64: 896 // Floating point operands handle 8-bit offsets. 897 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 898 break; 899 } 900 901 // If this is a stack pointer and the offset needs to be simplified then 902 // put the alloca address into a register, set the base type back to 903 // register and continue. This should almost never happen. 904 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 905 const TargetRegisterClass *RC = isThumb2 ? 906 (const TargetRegisterClass*)&ARM::tGPRRegClass : 907 (const TargetRegisterClass*)&ARM::GPRRegClass; 908 unsigned ResultReg = createResultReg(RC); 909 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 910 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 911 TII.get(Opc), ResultReg) 912 .addFrameIndex(Addr.Base.FI) 913 .addImm(0)); 914 Addr.Base.Reg = ResultReg; 915 Addr.BaseType = Address::RegBase; 916 } 917 918 // Since the offset is too large for the load/store instruction 919 // get the reg+offset into a register. 920 if (needsLowering) { 921 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 922 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 923 Addr.Offset = 0; 924 } 925} 926 927void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 928 const MachineInstrBuilder &MIB, 929 unsigned Flags, bool useAM3) { 930 // addrmode5 output depends on the selection dag addressing dividing the 931 // offset by 4 that it then later multiplies. Do this here as well. 932 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 933 Addr.Offset /= 4; 934 935 // Frame base works a bit differently. Handle it separately. 936 if (Addr.BaseType == Address::FrameIndexBase) { 937 int FI = Addr.Base.FI; 938 int Offset = Addr.Offset; 939 MachineMemOperand *MMO = 940 FuncInfo.MF->getMachineMemOperand( 941 MachinePointerInfo::getFixedStack(FI, Offset), 942 Flags, 943 MFI.getObjectSize(FI), 944 MFI.getObjectAlignment(FI)); 945 // Now add the rest of the operands. 946 MIB.addFrameIndex(FI); 947 948 // ARM halfword load/stores and signed byte loads need an additional 949 // operand. 950 if (useAM3) { 951 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 952 MIB.addReg(0); 953 MIB.addImm(Imm); 954 } else { 955 MIB.addImm(Addr.Offset); 956 } 957 MIB.addMemOperand(MMO); 958 } else { 959 // Now add the rest of the operands. 960 MIB.addReg(Addr.Base.Reg); 961 962 // ARM halfword load/stores and signed byte loads need an additional 963 // operand. 964 if (useAM3) { 965 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 966 MIB.addReg(0); 967 MIB.addImm(Imm); 968 } else { 969 MIB.addImm(Addr.Offset); 970 } 971 } 972 AddOptionalDefs(MIB); 973} 974 975bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 976 unsigned Alignment, bool isZExt, bool allocReg) { 977 unsigned Opc; 978 bool useAM3 = false; 979 bool needVMOV = false; 980 const TargetRegisterClass *RC; 981 switch (VT.SimpleTy) { 982 // This is mostly going to be Neon/vector support. 983 default: return false; 984 case MVT::i1: 985 case MVT::i8: 986 if (isThumb2) { 987 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 988 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 989 else 990 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 991 } else { 992 if (isZExt) { 993 Opc = ARM::LDRBi12; 994 } else { 995 Opc = ARM::LDRSB; 996 useAM3 = true; 997 } 998 } 999 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1000 break; 1001 case MVT::i16: 1002 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1003 return false; 1004 1005 if (isThumb2) { 1006 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1007 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1008 else 1009 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1010 } else { 1011 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1012 useAM3 = true; 1013 } 1014 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1015 break; 1016 case MVT::i32: 1017 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1018 return false; 1019 1020 if (isThumb2) { 1021 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1022 Opc = ARM::t2LDRi8; 1023 else 1024 Opc = ARM::t2LDRi12; 1025 } else { 1026 Opc = ARM::LDRi12; 1027 } 1028 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1029 break; 1030 case MVT::f32: 1031 if (!Subtarget->hasVFP2()) return false; 1032 // Unaligned loads need special handling. Floats require word-alignment. 1033 if (Alignment && Alignment < 4) { 1034 needVMOV = true; 1035 VT = MVT::i32; 1036 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1037 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1038 } else { 1039 Opc = ARM::VLDRS; 1040 RC = TLI.getRegClassFor(VT); 1041 } 1042 break; 1043 case MVT::f64: 1044 if (!Subtarget->hasVFP2()) return false; 1045 // FIXME: Unaligned loads need special handling. Doublewords require 1046 // word-alignment. 1047 if (Alignment && Alignment < 4) 1048 return false; 1049 1050 Opc = ARM::VLDRD; 1051 RC = TLI.getRegClassFor(VT); 1052 break; 1053 } 1054 // Simplify this down to something we can handle. 1055 ARMSimplifyAddress(Addr, VT, useAM3); 1056 1057 // Create the base instruction, then add the operands. 1058 if (allocReg) 1059 ResultReg = createResultReg(RC); 1060 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1061 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1062 TII.get(Opc), ResultReg); 1063 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1064 1065 // If we had an unaligned load of a float we've converted it to an regular 1066 // load. Now we must move from the GRP to the FP register. 1067 if (needVMOV) { 1068 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1069 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1070 TII.get(ARM::VMOVSR), MoveReg) 1071 .addReg(ResultReg)); 1072 ResultReg = MoveReg; 1073 } 1074 return true; 1075} 1076 1077bool ARMFastISel::SelectLoad(const Instruction *I) { 1078 // Atomic loads need special handling. 1079 if (cast<LoadInst>(I)->isAtomic()) 1080 return false; 1081 1082 // Verify we have a legal type before going any further. 1083 MVT VT; 1084 if (!isLoadTypeLegal(I->getType(), VT)) 1085 return false; 1086 1087 // See if we can handle this address. 1088 Address Addr; 1089 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1090 1091 unsigned ResultReg; 1092 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1093 return false; 1094 UpdateValueMap(I, ResultReg); 1095 return true; 1096} 1097 1098bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1099 unsigned Alignment) { 1100 unsigned StrOpc; 1101 bool useAM3 = false; 1102 switch (VT.SimpleTy) { 1103 // This is mostly going to be Neon/vector support. 1104 default: return false; 1105 case MVT::i1: { 1106 unsigned Res = createResultReg(isThumb2 ? 1107 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1108 (const TargetRegisterClass*)&ARM::GPRRegClass); 1109 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1110 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1111 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1112 TII.get(Opc), Res) 1113 .addReg(SrcReg).addImm(1)); 1114 SrcReg = Res; 1115 } // Fallthrough here. 1116 case MVT::i8: 1117 if (isThumb2) { 1118 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1119 StrOpc = ARM::t2STRBi8; 1120 else 1121 StrOpc = ARM::t2STRBi12; 1122 } else { 1123 StrOpc = ARM::STRBi12; 1124 } 1125 break; 1126 case MVT::i16: 1127 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1128 return false; 1129 1130 if (isThumb2) { 1131 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1132 StrOpc = ARM::t2STRHi8; 1133 else 1134 StrOpc = ARM::t2STRHi12; 1135 } else { 1136 StrOpc = ARM::STRH; 1137 useAM3 = true; 1138 } 1139 break; 1140 case MVT::i32: 1141 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1142 return false; 1143 1144 if (isThumb2) { 1145 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1146 StrOpc = ARM::t2STRi8; 1147 else 1148 StrOpc = ARM::t2STRi12; 1149 } else { 1150 StrOpc = ARM::STRi12; 1151 } 1152 break; 1153 case MVT::f32: 1154 if (!Subtarget->hasVFP2()) return false; 1155 // Unaligned stores need special handling. Floats require word-alignment. 1156 if (Alignment && Alignment < 4) { 1157 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1158 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1159 TII.get(ARM::VMOVRS), MoveReg) 1160 .addReg(SrcReg)); 1161 SrcReg = MoveReg; 1162 VT = MVT::i32; 1163 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1164 } else { 1165 StrOpc = ARM::VSTRS; 1166 } 1167 break; 1168 case MVT::f64: 1169 if (!Subtarget->hasVFP2()) return false; 1170 // FIXME: Unaligned stores need special handling. Doublewords require 1171 // word-alignment. 1172 if (Alignment && Alignment < 4) 1173 return false; 1174 1175 StrOpc = ARM::VSTRD; 1176 break; 1177 } 1178 // Simplify this down to something we can handle. 1179 ARMSimplifyAddress(Addr, VT, useAM3); 1180 1181 // Create the base instruction, then add the operands. 1182 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1183 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1184 TII.get(StrOpc)) 1185 .addReg(SrcReg); 1186 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1187 return true; 1188} 1189 1190bool ARMFastISel::SelectStore(const Instruction *I) { 1191 Value *Op0 = I->getOperand(0); 1192 unsigned SrcReg = 0; 1193 1194 // Atomic stores need special handling. 1195 if (cast<StoreInst>(I)->isAtomic()) 1196 return false; 1197 1198 // Verify we have a legal type before going any further. 1199 MVT VT; 1200 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1201 return false; 1202 1203 // Get the value to be stored into a register. 1204 SrcReg = getRegForValue(Op0); 1205 if (SrcReg == 0) return false; 1206 1207 // See if we can handle this address. 1208 Address Addr; 1209 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1210 return false; 1211 1212 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1213 return false; 1214 return true; 1215} 1216 1217static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1218 switch (Pred) { 1219 // Needs two compares... 1220 case CmpInst::FCMP_ONE: 1221 case CmpInst::FCMP_UEQ: 1222 default: 1223 // AL is our "false" for now. The other two need more compares. 1224 return ARMCC::AL; 1225 case CmpInst::ICMP_EQ: 1226 case CmpInst::FCMP_OEQ: 1227 return ARMCC::EQ; 1228 case CmpInst::ICMP_SGT: 1229 case CmpInst::FCMP_OGT: 1230 return ARMCC::GT; 1231 case CmpInst::ICMP_SGE: 1232 case CmpInst::FCMP_OGE: 1233 return ARMCC::GE; 1234 case CmpInst::ICMP_UGT: 1235 case CmpInst::FCMP_UGT: 1236 return ARMCC::HI; 1237 case CmpInst::FCMP_OLT: 1238 return ARMCC::MI; 1239 case CmpInst::ICMP_ULE: 1240 case CmpInst::FCMP_OLE: 1241 return ARMCC::LS; 1242 case CmpInst::FCMP_ORD: 1243 return ARMCC::VC; 1244 case CmpInst::FCMP_UNO: 1245 return ARMCC::VS; 1246 case CmpInst::FCMP_UGE: 1247 return ARMCC::PL; 1248 case CmpInst::ICMP_SLT: 1249 case CmpInst::FCMP_ULT: 1250 return ARMCC::LT; 1251 case CmpInst::ICMP_SLE: 1252 case CmpInst::FCMP_ULE: 1253 return ARMCC::LE; 1254 case CmpInst::FCMP_UNE: 1255 case CmpInst::ICMP_NE: 1256 return ARMCC::NE; 1257 case CmpInst::ICMP_UGE: 1258 return ARMCC::HS; 1259 case CmpInst::ICMP_ULT: 1260 return ARMCC::LO; 1261 } 1262} 1263 1264bool ARMFastISel::SelectBranch(const Instruction *I) { 1265 const BranchInst *BI = cast<BranchInst>(I); 1266 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1267 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1268 1269 // Simple branch support. 1270 1271 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1272 // behavior. 1273 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1274 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1275 1276 // Get the compare predicate. 1277 // Try to take advantage of fallthrough opportunities. 1278 CmpInst::Predicate Predicate = CI->getPredicate(); 1279 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1280 std::swap(TBB, FBB); 1281 Predicate = CmpInst::getInversePredicate(Predicate); 1282 } 1283 1284 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1285 1286 // We may not handle every CC for now. 1287 if (ARMPred == ARMCC::AL) return false; 1288 1289 // Emit the compare. 1290 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1291 return false; 1292 1293 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1295 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1296 FastEmitBranch(FBB, DbgLoc); 1297 FuncInfo.MBB->addSuccessor(TBB); 1298 return true; 1299 } 1300 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1301 MVT SourceVT; 1302 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1303 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1304 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1305 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1306 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1308 TII.get(TstOpc)) 1309 .addReg(OpReg).addImm(1)); 1310 1311 unsigned CCMode = ARMCC::NE; 1312 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1313 std::swap(TBB, FBB); 1314 CCMode = ARMCC::EQ; 1315 } 1316 1317 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1319 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1320 1321 FastEmitBranch(FBB, DbgLoc); 1322 FuncInfo.MBB->addSuccessor(TBB); 1323 return true; 1324 } 1325 } else if (const ConstantInt *CI = 1326 dyn_cast<ConstantInt>(BI->getCondition())) { 1327 uint64_t Imm = CI->getZExtValue(); 1328 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1329 FastEmitBranch(Target, DbgLoc); 1330 return true; 1331 } 1332 1333 unsigned CmpReg = getRegForValue(BI->getCondition()); 1334 if (CmpReg == 0) return false; 1335 1336 // We've been divorced from our compare! Our block was split, and 1337 // now our compare lives in a predecessor block. We musn't 1338 // re-compare here, as the children of the compare aren't guaranteed 1339 // live across the block boundary (we *could* check for this). 1340 // Regardless, the compare has been done in the predecessor block, 1341 // and it left a value for us in a virtual register. Ergo, we test 1342 // the one-bit value left in the virtual register. 1343 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1344 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1345 AddOptionalDefs( 1346 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1347 .addReg(CmpReg) 1348 .addImm(1)); 1349 1350 unsigned CCMode = ARMCC::NE; 1351 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1352 std::swap(TBB, FBB); 1353 CCMode = ARMCC::EQ; 1354 } 1355 1356 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1358 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1359 FastEmitBranch(FBB, DbgLoc); 1360 FuncInfo.MBB->addSuccessor(TBB); 1361 return true; 1362} 1363 1364bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1365 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1366 if (AddrReg == 0) return false; 1367 1368 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1370 TII.get(Opc)).addReg(AddrReg)); 1371 1372 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1373 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1374 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1375 1376 return true; 1377} 1378 1379bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1380 bool isZExt) { 1381 Type *Ty = Src1Value->getType(); 1382 EVT SrcEVT = TLI.getValueType(Ty, true); 1383 if (!SrcEVT.isSimple()) return false; 1384 MVT SrcVT = SrcEVT.getSimpleVT(); 1385 1386 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1387 if (isFloat && !Subtarget->hasVFP2()) 1388 return false; 1389 1390 // Check to see if the 2nd operand is a constant that we can encode directly 1391 // in the compare. 1392 int Imm = 0; 1393 bool UseImm = false; 1394 bool isNegativeImm = false; 1395 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1396 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1397 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1398 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1399 SrcVT == MVT::i1) { 1400 const APInt &CIVal = ConstInt->getValue(); 1401 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1402 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1403 // then a cmn, because there is no way to represent 2147483648 as a 1404 // signed 32-bit int. 1405 if (Imm < 0 && Imm != (int)0x80000000) { 1406 isNegativeImm = true; 1407 Imm = -Imm; 1408 } 1409 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1410 (ARM_AM::getSOImmVal(Imm) != -1); 1411 } 1412 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1413 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1414 if (ConstFP->isZero() && !ConstFP->isNegative()) 1415 UseImm = true; 1416 } 1417 1418 unsigned CmpOpc; 1419 bool isICmp = true; 1420 bool needsExt = false; 1421 switch (SrcVT.SimpleTy) { 1422 default: return false; 1423 // TODO: Verify compares. 1424 case MVT::f32: 1425 isICmp = false; 1426 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1427 break; 1428 case MVT::f64: 1429 isICmp = false; 1430 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1431 break; 1432 case MVT::i1: 1433 case MVT::i8: 1434 case MVT::i16: 1435 needsExt = true; 1436 // Intentional fall-through. 1437 case MVT::i32: 1438 if (isThumb2) { 1439 if (!UseImm) 1440 CmpOpc = ARM::t2CMPrr; 1441 else 1442 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1443 } else { 1444 if (!UseImm) 1445 CmpOpc = ARM::CMPrr; 1446 else 1447 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1448 } 1449 break; 1450 } 1451 1452 unsigned SrcReg1 = getRegForValue(Src1Value); 1453 if (SrcReg1 == 0) return false; 1454 1455 unsigned SrcReg2 = 0; 1456 if (!UseImm) { 1457 SrcReg2 = getRegForValue(Src2Value); 1458 if (SrcReg2 == 0) return false; 1459 } 1460 1461 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1462 if (needsExt) { 1463 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1464 if (SrcReg1 == 0) return false; 1465 if (!UseImm) { 1466 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1467 if (SrcReg2 == 0) return false; 1468 } 1469 } 1470 1471 const MCInstrDesc &II = TII.get(CmpOpc); 1472 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1473 if (!UseImm) { 1474 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1475 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1476 .addReg(SrcReg1).addReg(SrcReg2)); 1477 } else { 1478 MachineInstrBuilder MIB; 1479 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1480 .addReg(SrcReg1); 1481 1482 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1483 if (isICmp) 1484 MIB.addImm(Imm); 1485 AddOptionalDefs(MIB); 1486 } 1487 1488 // For floating point we need to move the result to a comparison register 1489 // that we can then use for branches. 1490 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1492 TII.get(ARM::FMSTAT))); 1493 return true; 1494} 1495 1496bool ARMFastISel::SelectCmp(const Instruction *I) { 1497 const CmpInst *CI = cast<CmpInst>(I); 1498 1499 // Get the compare predicate. 1500 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1501 1502 // We may not handle every CC for now. 1503 if (ARMPred == ARMCC::AL) return false; 1504 1505 // Emit the compare. 1506 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1507 return false; 1508 1509 // Now set a register based on the comparison. Explicitly set the predicates 1510 // here. 1511 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1512 const TargetRegisterClass *RC = isThumb2 ? 1513 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1514 (const TargetRegisterClass*)&ARM::GPRRegClass; 1515 unsigned DestReg = createResultReg(RC); 1516 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1517 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1518 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1519 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) 1520 .addReg(ZeroReg).addImm(1) 1521 .addImm(ARMPred).addReg(ARM::CPSR); 1522 1523 UpdateValueMap(I, DestReg); 1524 return true; 1525} 1526 1527bool ARMFastISel::SelectFPExt(const Instruction *I) { 1528 // Make sure we have VFP and that we're extending float to double. 1529 if (!Subtarget->hasVFP2()) return false; 1530 1531 Value *V = I->getOperand(0); 1532 if (!I->getType()->isDoubleTy() || 1533 !V->getType()->isFloatTy()) return false; 1534 1535 unsigned Op = getRegForValue(V); 1536 if (Op == 0) return false; 1537 1538 unsigned Result = createResultReg(&ARM::DPRRegClass); 1539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1540 TII.get(ARM::VCVTDS), Result) 1541 .addReg(Op)); 1542 UpdateValueMap(I, Result); 1543 return true; 1544} 1545 1546bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1547 // Make sure we have VFP and that we're truncating double to float. 1548 if (!Subtarget->hasVFP2()) return false; 1549 1550 Value *V = I->getOperand(0); 1551 if (!(I->getType()->isFloatTy() && 1552 V->getType()->isDoubleTy())) return false; 1553 1554 unsigned Op = getRegForValue(V); 1555 if (Op == 0) return false; 1556 1557 unsigned Result = createResultReg(&ARM::SPRRegClass); 1558 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1559 TII.get(ARM::VCVTSD), Result) 1560 .addReg(Op)); 1561 UpdateValueMap(I, Result); 1562 return true; 1563} 1564 1565bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1566 // Make sure we have VFP. 1567 if (!Subtarget->hasVFP2()) return false; 1568 1569 MVT DstVT; 1570 Type *Ty = I->getType(); 1571 if (!isTypeLegal(Ty, DstVT)) 1572 return false; 1573 1574 Value *Src = I->getOperand(0); 1575 EVT SrcEVT = TLI.getValueType(Src->getType(), true); 1576 if (!SrcEVT.isSimple()) 1577 return false; 1578 MVT SrcVT = SrcEVT.getSimpleVT(); 1579 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1580 return false; 1581 1582 unsigned SrcReg = getRegForValue(Src); 1583 if (SrcReg == 0) return false; 1584 1585 // Handle sign-extension. 1586 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1587 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1588 /*isZExt*/!isSigned); 1589 if (SrcReg == 0) return false; 1590 } 1591 1592 // The conversion routine works on fp-reg to fp-reg and the operand above 1593 // was an integer, move it to the fp registers if possible. 1594 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1595 if (FP == 0) return false; 1596 1597 unsigned Opc; 1598 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1599 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1600 else return false; 1601 1602 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1604 TII.get(Opc), ResultReg).addReg(FP)); 1605 UpdateValueMap(I, ResultReg); 1606 return true; 1607} 1608 1609bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1610 // Make sure we have VFP. 1611 if (!Subtarget->hasVFP2()) return false; 1612 1613 MVT DstVT; 1614 Type *RetTy = I->getType(); 1615 if (!isTypeLegal(RetTy, DstVT)) 1616 return false; 1617 1618 unsigned Op = getRegForValue(I->getOperand(0)); 1619 if (Op == 0) return false; 1620 1621 unsigned Opc; 1622 Type *OpTy = I->getOperand(0)->getType(); 1623 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1624 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1625 else return false; 1626 1627 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1628 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1629 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1630 TII.get(Opc), ResultReg).addReg(Op)); 1631 1632 // This result needs to be in an integer register, but the conversion only 1633 // takes place in fp-regs. 1634 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1635 if (IntReg == 0) return false; 1636 1637 UpdateValueMap(I, IntReg); 1638 return true; 1639} 1640 1641bool ARMFastISel::SelectSelect(const Instruction *I) { 1642 MVT VT; 1643 if (!isTypeLegal(I->getType(), VT)) 1644 return false; 1645 1646 // Things need to be register sized for register moves. 1647 if (VT != MVT::i32) return false; 1648 1649 unsigned CondReg = getRegForValue(I->getOperand(0)); 1650 if (CondReg == 0) return false; 1651 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1652 if (Op1Reg == 0) return false; 1653 1654 // Check to see if we can use an immediate in the conditional move. 1655 int Imm = 0; 1656 bool UseImm = false; 1657 bool isNegativeImm = false; 1658 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1659 assert (VT == MVT::i32 && "Expecting an i32."); 1660 Imm = (int)ConstInt->getValue().getZExtValue(); 1661 if (Imm < 0) { 1662 isNegativeImm = true; 1663 Imm = ~Imm; 1664 } 1665 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1666 (ARM_AM::getSOImmVal(Imm) != -1); 1667 } 1668 1669 unsigned Op2Reg = 0; 1670 if (!UseImm) { 1671 Op2Reg = getRegForValue(I->getOperand(2)); 1672 if (Op2Reg == 0) return false; 1673 } 1674 1675 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1676 CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0); 1677 AddOptionalDefs( 1678 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) 1679 .addReg(CondReg) 1680 .addImm(0)); 1681 1682 unsigned MovCCOpc; 1683 const TargetRegisterClass *RC; 1684 if (!UseImm) { 1685 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1686 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1687 } else { 1688 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1689 if (!isNegativeImm) 1690 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1691 else 1692 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1693 } 1694 unsigned ResultReg = createResultReg(RC); 1695 if (!UseImm) { 1696 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1697 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1698 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1699 ResultReg) 1700 .addReg(Op2Reg) 1701 .addReg(Op1Reg) 1702 .addImm(ARMCC::NE) 1703 .addReg(ARM::CPSR); 1704 } else { 1705 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1706 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1707 ResultReg) 1708 .addReg(Op1Reg) 1709 .addImm(Imm) 1710 .addImm(ARMCC::EQ) 1711 .addReg(ARM::CPSR); 1712 } 1713 UpdateValueMap(I, ResultReg); 1714 return true; 1715} 1716 1717bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1718 MVT VT; 1719 Type *Ty = I->getType(); 1720 if (!isTypeLegal(Ty, VT)) 1721 return false; 1722 1723 // If we have integer div support we should have selected this automagically. 1724 // In case we have a real miss go ahead and return false and we'll pick 1725 // it up later. 1726 if (Subtarget->hasDivide()) return false; 1727 1728 // Otherwise emit a libcall. 1729 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1730 if (VT == MVT::i8) 1731 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1732 else if (VT == MVT::i16) 1733 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1734 else if (VT == MVT::i32) 1735 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1736 else if (VT == MVT::i64) 1737 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1738 else if (VT == MVT::i128) 1739 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1740 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1741 1742 return ARMEmitLibcall(I, LC); 1743} 1744 1745bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1746 MVT VT; 1747 Type *Ty = I->getType(); 1748 if (!isTypeLegal(Ty, VT)) 1749 return false; 1750 1751 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1752 if (VT == MVT::i8) 1753 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1754 else if (VT == MVT::i16) 1755 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1756 else if (VT == MVT::i32) 1757 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1758 else if (VT == MVT::i64) 1759 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1760 else if (VT == MVT::i128) 1761 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1762 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1763 1764 return ARMEmitLibcall(I, LC); 1765} 1766 1767bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1768 EVT DestVT = TLI.getValueType(I->getType(), true); 1769 1770 // We can get here in the case when we have a binary operation on a non-legal 1771 // type and the target independent selector doesn't know how to handle it. 1772 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1773 return false; 1774 1775 unsigned Opc; 1776 switch (ISDOpcode) { 1777 default: return false; 1778 case ISD::ADD: 1779 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1780 break; 1781 case ISD::OR: 1782 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1783 break; 1784 case ISD::SUB: 1785 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1786 break; 1787 } 1788 1789 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1790 if (SrcReg1 == 0) return false; 1791 1792 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1793 // in the instruction, rather then materializing the value in a register. 1794 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1795 if (SrcReg2 == 0) return false; 1796 1797 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1798 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1799 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1800 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1801 TII.get(Opc), ResultReg) 1802 .addReg(SrcReg1).addReg(SrcReg2)); 1803 UpdateValueMap(I, ResultReg); 1804 return true; 1805} 1806 1807bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1808 EVT FPVT = TLI.getValueType(I->getType(), true); 1809 if (!FPVT.isSimple()) return false; 1810 MVT VT = FPVT.getSimpleVT(); 1811 1812 // We can get here in the case when we want to use NEON for our fp 1813 // operations, but can't figure out how to. Just use the vfp instructions 1814 // if we have them. 1815 // FIXME: It'd be nice to use NEON instructions. 1816 Type *Ty = I->getType(); 1817 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1818 if (isFloat && !Subtarget->hasVFP2()) 1819 return false; 1820 1821 unsigned Opc; 1822 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1823 switch (ISDOpcode) { 1824 default: return false; 1825 case ISD::FADD: 1826 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1827 break; 1828 case ISD::FSUB: 1829 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1830 break; 1831 case ISD::FMUL: 1832 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1833 break; 1834 } 1835 unsigned Op1 = getRegForValue(I->getOperand(0)); 1836 if (Op1 == 0) return false; 1837 1838 unsigned Op2 = getRegForValue(I->getOperand(1)); 1839 if (Op2 == 0) return false; 1840 1841 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1842 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1843 TII.get(Opc), ResultReg) 1844 .addReg(Op1).addReg(Op2)); 1845 UpdateValueMap(I, ResultReg); 1846 return true; 1847} 1848 1849// Call Handling Code 1850 1851// This is largely taken directly from CCAssignFnForNode 1852// TODO: We may not support all of this. 1853CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1854 bool Return, 1855 bool isVarArg) { 1856 switch (CC) { 1857 default: 1858 llvm_unreachable("Unsupported calling convention"); 1859 case CallingConv::Fast: 1860 if (Subtarget->hasVFP2() && !isVarArg) { 1861 if (!Subtarget->isAAPCS_ABI()) 1862 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1863 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1864 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1865 } 1866 // Fallthrough 1867 case CallingConv::C: 1868 // Use target triple & subtarget features to do actual dispatch. 1869 if (Subtarget->isAAPCS_ABI()) { 1870 if (Subtarget->hasVFP2() && 1871 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1872 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1873 else 1874 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1875 } else 1876 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1877 case CallingConv::ARM_AAPCS_VFP: 1878 if (!isVarArg) 1879 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1880 // Fall through to soft float variant, variadic functions don't 1881 // use hard floating point ABI. 1882 case CallingConv::ARM_AAPCS: 1883 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1884 case CallingConv::ARM_APCS: 1885 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1886 case CallingConv::GHC: 1887 if (Return) 1888 llvm_unreachable("Can't return in GHC call convention"); 1889 else 1890 return CC_ARM_APCS_GHC; 1891 } 1892} 1893 1894bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1895 SmallVectorImpl<unsigned> &ArgRegs, 1896 SmallVectorImpl<MVT> &ArgVTs, 1897 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1898 SmallVectorImpl<unsigned> &RegArgs, 1899 CallingConv::ID CC, 1900 unsigned &NumBytes, 1901 bool isVarArg) { 1902 SmallVector<CCValAssign, 16> ArgLocs; 1903 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1904 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1905 CCAssignFnForCall(CC, false, isVarArg)); 1906 1907 // Check that we can handle all of the arguments. If we can't, then bail out 1908 // now before we add code to the MBB. 1909 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1910 CCValAssign &VA = ArgLocs[i]; 1911 MVT ArgVT = ArgVTs[VA.getValNo()]; 1912 1913 // We don't handle NEON/vector parameters yet. 1914 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1915 return false; 1916 1917 // Now copy/store arg to correct locations. 1918 if (VA.isRegLoc() && !VA.needsCustom()) { 1919 continue; 1920 } else if (VA.needsCustom()) { 1921 // TODO: We need custom lowering for vector (v2f64) args. 1922 if (VA.getLocVT() != MVT::f64 || 1923 // TODO: Only handle register args for now. 1924 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1925 return false; 1926 } else { 1927 switch (ArgVT.SimpleTy) { 1928 default: 1929 return false; 1930 case MVT::i1: 1931 case MVT::i8: 1932 case MVT::i16: 1933 case MVT::i32: 1934 break; 1935 case MVT::f32: 1936 if (!Subtarget->hasVFP2()) 1937 return false; 1938 break; 1939 case MVT::f64: 1940 if (!Subtarget->hasVFP2()) 1941 return false; 1942 break; 1943 } 1944 } 1945 } 1946 1947 // At the point, we are able to handle the call's arguments in fast isel. 1948 1949 // Get a count of how many bytes are to be pushed on the stack. 1950 NumBytes = CCInfo.getNextStackOffset(); 1951 1952 // Issue CALLSEQ_START 1953 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1954 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1955 TII.get(AdjStackDown)) 1956 .addImm(NumBytes)); 1957 1958 // Process the args. 1959 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1960 CCValAssign &VA = ArgLocs[i]; 1961 unsigned Arg = ArgRegs[VA.getValNo()]; 1962 MVT ArgVT = ArgVTs[VA.getValNo()]; 1963 1964 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1965 "We don't handle NEON/vector parameters yet."); 1966 1967 // Handle arg promotion, etc. 1968 switch (VA.getLocInfo()) { 1969 case CCValAssign::Full: break; 1970 case CCValAssign::SExt: { 1971 MVT DestVT = VA.getLocVT(); 1972 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1973 assert (Arg != 0 && "Failed to emit a sext"); 1974 ArgVT = DestVT; 1975 break; 1976 } 1977 case CCValAssign::AExt: 1978 // Intentional fall-through. Handle AExt and ZExt. 1979 case CCValAssign::ZExt: { 1980 MVT DestVT = VA.getLocVT(); 1981 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1982 assert (Arg != 0 && "Failed to emit a zext"); 1983 ArgVT = DestVT; 1984 break; 1985 } 1986 case CCValAssign::BCvt: { 1987 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1988 /*TODO: Kill=*/false); 1989 assert(BC != 0 && "Failed to emit a bitcast!"); 1990 Arg = BC; 1991 ArgVT = VA.getLocVT(); 1992 break; 1993 } 1994 default: llvm_unreachable("Unknown arg promotion!"); 1995 } 1996 1997 // Now copy/store arg to correct locations. 1998 if (VA.isRegLoc() && !VA.needsCustom()) { 1999 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2000 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 2001 RegArgs.push_back(VA.getLocReg()); 2002 } else if (VA.needsCustom()) { 2003 // TODO: We need custom lowering for vector (v2f64) args. 2004 assert(VA.getLocVT() == MVT::f64 && 2005 "Custom lowering for v2f64 args not available"); 2006 2007 CCValAssign &NextVA = ArgLocs[++i]; 2008 2009 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2010 "We only handle register args!"); 2011 2012 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2013 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2014 .addReg(NextVA.getLocReg(), RegState::Define) 2015 .addReg(Arg)); 2016 RegArgs.push_back(VA.getLocReg()); 2017 RegArgs.push_back(NextVA.getLocReg()); 2018 } else { 2019 assert(VA.isMemLoc()); 2020 // Need to store on the stack. 2021 Address Addr; 2022 Addr.BaseType = Address::RegBase; 2023 Addr.Base.Reg = ARM::SP; 2024 Addr.Offset = VA.getLocMemOffset(); 2025 2026 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2027 assert(EmitRet && "Could not emit a store for argument!"); 2028 } 2029 } 2030 2031 return true; 2032} 2033 2034bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2035 const Instruction *I, CallingConv::ID CC, 2036 unsigned &NumBytes, bool isVarArg) { 2037 // Issue CALLSEQ_END 2038 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2039 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2040 TII.get(AdjStackUp)) 2041 .addImm(NumBytes).addImm(0)); 2042 2043 // Now the return value. 2044 if (RetVT != MVT::isVoid) { 2045 SmallVector<CCValAssign, 16> RVLocs; 2046 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2047 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2048 2049 // Copy all of the result registers out of their specified physreg. 2050 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2051 // For this move we copy into two registers and then move into the 2052 // double fp reg we want. 2053 MVT DestVT = RVLocs[0].getValVT(); 2054 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2055 unsigned ResultReg = createResultReg(DstRC); 2056 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2057 TII.get(ARM::VMOVDRR), ResultReg) 2058 .addReg(RVLocs[0].getLocReg()) 2059 .addReg(RVLocs[1].getLocReg())); 2060 2061 UsedRegs.push_back(RVLocs[0].getLocReg()); 2062 UsedRegs.push_back(RVLocs[1].getLocReg()); 2063 2064 // Finally update the result. 2065 UpdateValueMap(I, ResultReg); 2066 } else { 2067 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2068 MVT CopyVT = RVLocs[0].getValVT(); 2069 2070 // Special handling for extended integers. 2071 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2072 CopyVT = MVT::i32; 2073 2074 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2075 2076 unsigned ResultReg = createResultReg(DstRC); 2077 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2078 TII.get(TargetOpcode::COPY), 2079 ResultReg).addReg(RVLocs[0].getLocReg()); 2080 UsedRegs.push_back(RVLocs[0].getLocReg()); 2081 2082 // Finally update the result. 2083 UpdateValueMap(I, ResultReg); 2084 } 2085 } 2086 2087 return true; 2088} 2089 2090bool ARMFastISel::SelectRet(const Instruction *I) { 2091 const ReturnInst *Ret = cast<ReturnInst>(I); 2092 const Function &F = *I->getParent()->getParent(); 2093 2094 if (!FuncInfo.CanLowerReturn) 2095 return false; 2096 2097 // Build a list of return value registers. 2098 SmallVector<unsigned, 4> RetRegs; 2099 2100 CallingConv::ID CC = F.getCallingConv(); 2101 if (Ret->getNumOperands() > 0) { 2102 SmallVector<ISD::OutputArg, 4> Outs; 2103 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 2104 2105 // Analyze operands of the call, assigning locations to each operand. 2106 SmallVector<CCValAssign, 16> ValLocs; 2107 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2108 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2109 F.isVarArg())); 2110 2111 const Value *RV = Ret->getOperand(0); 2112 unsigned Reg = getRegForValue(RV); 2113 if (Reg == 0) 2114 return false; 2115 2116 // Only handle a single return value for now. 2117 if (ValLocs.size() != 1) 2118 return false; 2119 2120 CCValAssign &VA = ValLocs[0]; 2121 2122 // Don't bother handling odd stuff for now. 2123 if (VA.getLocInfo() != CCValAssign::Full) 2124 return false; 2125 // Only handle register returns for now. 2126 if (!VA.isRegLoc()) 2127 return false; 2128 2129 unsigned SrcReg = Reg + VA.getValNo(); 2130 EVT RVEVT = TLI.getValueType(RV->getType()); 2131 if (!RVEVT.isSimple()) return false; 2132 MVT RVVT = RVEVT.getSimpleVT(); 2133 MVT DestVT = VA.getValVT(); 2134 // Special handling for extended integers. 2135 if (RVVT != DestVT) { 2136 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2137 return false; 2138 2139 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2140 2141 // Perform extension if flagged as either zext or sext. Otherwise, do 2142 // nothing. 2143 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2144 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2145 if (SrcReg == 0) return false; 2146 } 2147 } 2148 2149 // Make the copy. 2150 unsigned DstReg = VA.getLocReg(); 2151 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2152 // Avoid a cross-class copy. This is very unlikely. 2153 if (!SrcRC->contains(DstReg)) 2154 return false; 2155 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2156 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); 2157 2158 // Add register to return instruction. 2159 RetRegs.push_back(VA.getLocReg()); 2160 } 2161 2162 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2163 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2164 TII.get(RetOpc)); 2165 AddOptionalDefs(MIB); 2166 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2167 MIB.addReg(RetRegs[i], RegState::Implicit); 2168 return true; 2169} 2170 2171unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2172 if (UseReg) 2173 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2174 else 2175 return isThumb2 ? ARM::tBL : ARM::BL; 2176} 2177 2178unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2179 // Manually compute the global's type to avoid building it when unnecessary. 2180 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2181 EVT LCREVT = TLI.getValueType(GVTy); 2182 if (!LCREVT.isSimple()) return 0; 2183 2184 GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, 2185 GlobalValue::ExternalLinkage, 0, Name); 2186 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2187 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2188} 2189 2190// A quick function that will emit a call for a named libcall in F with the 2191// vector of passed arguments for the Instruction in I. We can assume that we 2192// can emit a call for any libcall we can produce. This is an abridged version 2193// of the full call infrastructure since we won't need to worry about things 2194// like computed function pointers or strange arguments at call sites. 2195// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2196// with X86. 2197bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2198 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2199 2200 // Handle *simple* calls for now. 2201 Type *RetTy = I->getType(); 2202 MVT RetVT; 2203 if (RetTy->isVoidTy()) 2204 RetVT = MVT::isVoid; 2205 else if (!isTypeLegal(RetTy, RetVT)) 2206 return false; 2207 2208 // Can't handle non-double multi-reg retvals. 2209 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2210 SmallVector<CCValAssign, 16> RVLocs; 2211 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2212 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2213 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2214 return false; 2215 } 2216 2217 // Set up the argument vectors. 2218 SmallVector<Value*, 8> Args; 2219 SmallVector<unsigned, 8> ArgRegs; 2220 SmallVector<MVT, 8> ArgVTs; 2221 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2222 Args.reserve(I->getNumOperands()); 2223 ArgRegs.reserve(I->getNumOperands()); 2224 ArgVTs.reserve(I->getNumOperands()); 2225 ArgFlags.reserve(I->getNumOperands()); 2226 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2227 Value *Op = I->getOperand(i); 2228 unsigned Arg = getRegForValue(Op); 2229 if (Arg == 0) return false; 2230 2231 Type *ArgTy = Op->getType(); 2232 MVT ArgVT; 2233 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2234 2235 ISD::ArgFlagsTy Flags; 2236 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2237 Flags.setOrigAlign(OriginalAlignment); 2238 2239 Args.push_back(Op); 2240 ArgRegs.push_back(Arg); 2241 ArgVTs.push_back(ArgVT); 2242 ArgFlags.push_back(Flags); 2243 } 2244 2245 // Handle the arguments now that we've gotten them. 2246 SmallVector<unsigned, 4> RegArgs; 2247 unsigned NumBytes; 2248 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2249 RegArgs, CC, NumBytes, false)) 2250 return false; 2251 2252 unsigned CalleeReg = 0; 2253 if (EnableARMLongCalls) { 2254 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2255 if (CalleeReg == 0) return false; 2256 } 2257 2258 // Issue the call. 2259 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2260 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2261 DbgLoc, TII.get(CallOpc)); 2262 // BL / BLX don't take a predicate, but tBL / tBLX do. 2263 if (isThumb2) 2264 AddDefaultPred(MIB); 2265 if (EnableARMLongCalls) 2266 MIB.addReg(CalleeReg); 2267 else 2268 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2269 2270 // Add implicit physical register uses to the call. 2271 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2272 MIB.addReg(RegArgs[i], RegState::Implicit); 2273 2274 // Add a register mask with the call-preserved registers. 2275 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2276 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2277 2278 // Finish off the call including any return values. 2279 SmallVector<unsigned, 4> UsedRegs; 2280 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2281 2282 // Set all unused physreg defs as dead. 2283 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2284 2285 return true; 2286} 2287 2288bool ARMFastISel::SelectCall(const Instruction *I, 2289 const char *IntrMemName = 0) { 2290 const CallInst *CI = cast<CallInst>(I); 2291 const Value *Callee = CI->getCalledValue(); 2292 2293 // Can't handle inline asm. 2294 if (isa<InlineAsm>(Callee)) return false; 2295 2296 // Allow SelectionDAG isel to handle tail calls. 2297 if (CI->isTailCall()) return false; 2298 2299 // Check the calling convention. 2300 ImmutableCallSite CS(CI); 2301 CallingConv::ID CC = CS.getCallingConv(); 2302 2303 // TODO: Avoid some calling conventions? 2304 2305 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2306 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2307 bool isVarArg = FTy->isVarArg(); 2308 2309 // Handle *simple* calls for now. 2310 Type *RetTy = I->getType(); 2311 MVT RetVT; 2312 if (RetTy->isVoidTy()) 2313 RetVT = MVT::isVoid; 2314 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2315 RetVT != MVT::i8 && RetVT != MVT::i1) 2316 return false; 2317 2318 // Can't handle non-double multi-reg retvals. 2319 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2320 RetVT != MVT::i16 && RetVT != MVT::i32) { 2321 SmallVector<CCValAssign, 16> RVLocs; 2322 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2323 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2324 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2325 return false; 2326 } 2327 2328 // Set up the argument vectors. 2329 SmallVector<Value*, 8> Args; 2330 SmallVector<unsigned, 8> ArgRegs; 2331 SmallVector<MVT, 8> ArgVTs; 2332 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2333 unsigned arg_size = CS.arg_size(); 2334 Args.reserve(arg_size); 2335 ArgRegs.reserve(arg_size); 2336 ArgVTs.reserve(arg_size); 2337 ArgFlags.reserve(arg_size); 2338 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2339 i != e; ++i) { 2340 // If we're lowering a memory intrinsic instead of a regular call, skip the 2341 // last two arguments, which shouldn't be passed to the underlying function. 2342 if (IntrMemName && e-i <= 2) 2343 break; 2344 2345 ISD::ArgFlagsTy Flags; 2346 unsigned AttrInd = i - CS.arg_begin() + 1; 2347 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2348 Flags.setSExt(); 2349 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2350 Flags.setZExt(); 2351 2352 // FIXME: Only handle *easy* calls for now. 2353 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2354 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2355 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2356 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2357 return false; 2358 2359 Type *ArgTy = (*i)->getType(); 2360 MVT ArgVT; 2361 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2362 ArgVT != MVT::i1) 2363 return false; 2364 2365 unsigned Arg = getRegForValue(*i); 2366 if (Arg == 0) 2367 return false; 2368 2369 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2370 Flags.setOrigAlign(OriginalAlignment); 2371 2372 Args.push_back(*i); 2373 ArgRegs.push_back(Arg); 2374 ArgVTs.push_back(ArgVT); 2375 ArgFlags.push_back(Flags); 2376 } 2377 2378 // Handle the arguments now that we've gotten them. 2379 SmallVector<unsigned, 4> RegArgs; 2380 unsigned NumBytes; 2381 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2382 RegArgs, CC, NumBytes, isVarArg)) 2383 return false; 2384 2385 bool UseReg = false; 2386 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2387 if (!GV || EnableARMLongCalls) UseReg = true; 2388 2389 unsigned CalleeReg = 0; 2390 if (UseReg) { 2391 if (IntrMemName) 2392 CalleeReg = getLibcallReg(IntrMemName); 2393 else 2394 CalleeReg = getRegForValue(Callee); 2395 2396 if (CalleeReg == 0) return false; 2397 } 2398 2399 // Issue the call. 2400 unsigned CallOpc = ARMSelectCallOp(UseReg); 2401 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2402 DbgLoc, TII.get(CallOpc)); 2403 2404 unsigned char OpFlags = 0; 2405 2406 // Add MO_PLT for global address or external symbol in the PIC relocation 2407 // model. 2408 if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_) 2409 OpFlags = ARMII::MO_PLT; 2410 2411 // ARM calls don't take a predicate, but tBL / tBLX do. 2412 if(isThumb2) 2413 AddDefaultPred(MIB); 2414 if (UseReg) 2415 MIB.addReg(CalleeReg); 2416 else if (!IntrMemName) 2417 MIB.addGlobalAddress(GV, 0, OpFlags); 2418 else 2419 MIB.addExternalSymbol(IntrMemName, OpFlags); 2420 2421 // Add implicit physical register uses to the call. 2422 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2423 MIB.addReg(RegArgs[i], RegState::Implicit); 2424 2425 // Add a register mask with the call-preserved registers. 2426 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2427 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2428 2429 // Finish off the call including any return values. 2430 SmallVector<unsigned, 4> UsedRegs; 2431 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2432 return false; 2433 2434 // Set all unused physreg defs as dead. 2435 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2436 2437 return true; 2438} 2439 2440bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2441 return Len <= 16; 2442} 2443 2444bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2445 uint64_t Len, unsigned Alignment) { 2446 // Make sure we don't bloat code by inlining very large memcpy's. 2447 if (!ARMIsMemCpySmall(Len)) 2448 return false; 2449 2450 while (Len) { 2451 MVT VT; 2452 if (!Alignment || Alignment >= 4) { 2453 if (Len >= 4) 2454 VT = MVT::i32; 2455 else if (Len >= 2) 2456 VT = MVT::i16; 2457 else { 2458 assert (Len == 1 && "Expected a length of 1!"); 2459 VT = MVT::i8; 2460 } 2461 } else { 2462 // Bound based on alignment. 2463 if (Len >= 2 && Alignment == 2) 2464 VT = MVT::i16; 2465 else { 2466 VT = MVT::i8; 2467 } 2468 } 2469 2470 bool RV; 2471 unsigned ResultReg; 2472 RV = ARMEmitLoad(VT, ResultReg, Src); 2473 assert (RV == true && "Should be able to handle this load."); 2474 RV = ARMEmitStore(VT, ResultReg, Dest); 2475 assert (RV == true && "Should be able to handle this store."); 2476 (void)RV; 2477 2478 unsigned Size = VT.getSizeInBits()/8; 2479 Len -= Size; 2480 Dest.Offset += Size; 2481 Src.Offset += Size; 2482 } 2483 2484 return true; 2485} 2486 2487bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2488 // FIXME: Handle more intrinsics. 2489 switch (I.getIntrinsicID()) { 2490 default: return false; 2491 case Intrinsic::frameaddress: { 2492 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2493 MFI->setFrameAddressIsTaken(true); 2494 2495 unsigned LdrOpc; 2496 const TargetRegisterClass *RC; 2497 if (isThumb2) { 2498 LdrOpc = ARM::t2LDRi12; 2499 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2500 } else { 2501 LdrOpc = ARM::LDRi12; 2502 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2503 } 2504 2505 const ARMBaseRegisterInfo *RegInfo = 2506 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2507 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2508 unsigned SrcReg = FramePtr; 2509 2510 // Recursively load frame address 2511 // ldr r0 [fp] 2512 // ldr r0 [r0] 2513 // ldr r0 [r0] 2514 // ... 2515 unsigned DestReg; 2516 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2517 while (Depth--) { 2518 DestReg = createResultReg(RC); 2519 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2520 TII.get(LdrOpc), DestReg) 2521 .addReg(SrcReg).addImm(0)); 2522 SrcReg = DestReg; 2523 } 2524 UpdateValueMap(&I, SrcReg); 2525 return true; 2526 } 2527 case Intrinsic::memcpy: 2528 case Intrinsic::memmove: { 2529 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2530 // Don't handle volatile. 2531 if (MTI.isVolatile()) 2532 return false; 2533 2534 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2535 // we would emit dead code because we don't currently handle memmoves. 2536 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2537 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2538 // Small memcpy's are common enough that we want to do them without a call 2539 // if possible. 2540 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2541 if (ARMIsMemCpySmall(Len)) { 2542 Address Dest, Src; 2543 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2544 !ARMComputeAddress(MTI.getRawSource(), Src)) 2545 return false; 2546 unsigned Alignment = MTI.getAlignment(); 2547 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2548 return true; 2549 } 2550 } 2551 2552 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2553 return false; 2554 2555 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2556 return false; 2557 2558 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2559 return SelectCall(&I, IntrMemName); 2560 } 2561 case Intrinsic::memset: { 2562 const MemSetInst &MSI = cast<MemSetInst>(I); 2563 // Don't handle volatile. 2564 if (MSI.isVolatile()) 2565 return false; 2566 2567 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2568 return false; 2569 2570 if (MSI.getDestAddressSpace() > 255) 2571 return false; 2572 2573 return SelectCall(&I, "memset"); 2574 } 2575 case Intrinsic::trap: { 2576 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get( 2577 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2578 return true; 2579 } 2580 } 2581} 2582 2583bool ARMFastISel::SelectTrunc(const Instruction *I) { 2584 // The high bits for a type smaller than the register size are assumed to be 2585 // undefined. 2586 Value *Op = I->getOperand(0); 2587 2588 EVT SrcVT, DestVT; 2589 SrcVT = TLI.getValueType(Op->getType(), true); 2590 DestVT = TLI.getValueType(I->getType(), true); 2591 2592 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2593 return false; 2594 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2595 return false; 2596 2597 unsigned SrcReg = getRegForValue(Op); 2598 if (!SrcReg) return false; 2599 2600 // Because the high bits are undefined, a truncate doesn't generate 2601 // any code. 2602 UpdateValueMap(I, SrcReg); 2603 return true; 2604} 2605 2606unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2607 bool isZExt) { 2608 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2609 return 0; 2610 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2611 return 0; 2612 2613 // Table of which combinations can be emitted as a single instruction, 2614 // and which will require two. 2615 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2616 // ARM Thumb 2617 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2618 // ext: s z s z s z s z 2619 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2620 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2621 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2622 }; 2623 2624 // Target registers for: 2625 // - For ARM can never be PC. 2626 // - For 16-bit Thumb are restricted to lower 8 registers. 2627 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2628 static const TargetRegisterClass *RCTbl[2][2] = { 2629 // Instructions: Two Single 2630 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2631 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2632 }; 2633 2634 // Table governing the instruction(s) to be emitted. 2635 static const struct InstructionTable { 2636 uint32_t Opc : 16; 2637 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2638 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2639 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2640 } IT[2][2][3][2] = { 2641 { // Two instructions (first is left shift, second is in this table). 2642 { // ARM Opc S Shift Imm 2643 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2644 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2645 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2646 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2647 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2648 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2649 }, 2650 { // Thumb Opc S Shift Imm 2651 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2652 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2653 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2654 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2655 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2656 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2657 } 2658 }, 2659 { // Single instruction. 2660 { // ARM Opc S Shift Imm 2661 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2662 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2663 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2664 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2665 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2666 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2667 }, 2668 { // Thumb Opc S Shift Imm 2669 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2670 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2671 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2672 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2673 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2674 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2675 } 2676 } 2677 }; 2678 2679 unsigned SrcBits = SrcVT.getSizeInBits(); 2680 unsigned DestBits = DestVT.getSizeInBits(); 2681 (void) DestBits; 2682 assert((SrcBits < DestBits) && "can only extend to larger types"); 2683 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2684 "other sizes unimplemented"); 2685 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2686 "other sizes unimplemented"); 2687 2688 bool hasV6Ops = Subtarget->hasV6Ops(); 2689 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2690 assert((Bitness < 3) && "sanity-check table bounds"); 2691 2692 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2693 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2694 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2695 unsigned Opc = ITP->Opc; 2696 assert(ARM::KILL != Opc && "Invalid table entry"); 2697 unsigned hasS = ITP->hasS; 2698 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2699 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2700 "only MOVsi has shift operand addressing mode"); 2701 unsigned Imm = ITP->Imm; 2702 2703 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2704 bool setsCPSR = &ARM::tGPRRegClass == RC; 2705 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2706 unsigned ResultReg; 2707 // MOVsi encodes shift and immediate in shift operand addressing mode. 2708 // The following condition has the same value when emitting two 2709 // instruction sequences: both are shifts. 2710 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2711 2712 // Either one or two instructions are emitted. 2713 // They're always of the form: 2714 // dst = in OP imm 2715 // CPSR is set only by 16-bit Thumb instructions. 2716 // Predicate, if any, is AL. 2717 // S bit, if available, is always 0. 2718 // When two are emitted the first's result will feed as the second's input, 2719 // that value is then dead. 2720 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2721 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2722 ResultReg = createResultReg(RC); 2723 bool isLsl = (0 == Instr) && !isSingleInstr; 2724 unsigned Opcode = isLsl ? LSLOpc : Opc; 2725 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2726 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2727 bool isKill = 1 == Instr; 2728 MachineInstrBuilder MIB = BuildMI( 2729 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); 2730 if (setsCPSR) 2731 MIB.addReg(ARM::CPSR, RegState::Define); 2732 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2733 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2734 if (hasS) 2735 AddDefaultCC(MIB); 2736 // Second instruction consumes the first's result. 2737 SrcReg = ResultReg; 2738 } 2739 2740 return ResultReg; 2741} 2742 2743bool ARMFastISel::SelectIntExt(const Instruction *I) { 2744 // On ARM, in general, integer casts don't involve legal types; this code 2745 // handles promotable integers. 2746 Type *DestTy = I->getType(); 2747 Value *Src = I->getOperand(0); 2748 Type *SrcTy = Src->getType(); 2749 2750 bool isZExt = isa<ZExtInst>(I); 2751 unsigned SrcReg = getRegForValue(Src); 2752 if (!SrcReg) return false; 2753 2754 EVT SrcEVT, DestEVT; 2755 SrcEVT = TLI.getValueType(SrcTy, true); 2756 DestEVT = TLI.getValueType(DestTy, true); 2757 if (!SrcEVT.isSimple()) return false; 2758 if (!DestEVT.isSimple()) return false; 2759 2760 MVT SrcVT = SrcEVT.getSimpleVT(); 2761 MVT DestVT = DestEVT.getSimpleVT(); 2762 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2763 if (ResultReg == 0) return false; 2764 UpdateValueMap(I, ResultReg); 2765 return true; 2766} 2767 2768bool ARMFastISel::SelectShift(const Instruction *I, 2769 ARM_AM::ShiftOpc ShiftTy) { 2770 // We handle thumb2 mode by target independent selector 2771 // or SelectionDAG ISel. 2772 if (isThumb2) 2773 return false; 2774 2775 // Only handle i32 now. 2776 EVT DestVT = TLI.getValueType(I->getType(), true); 2777 if (DestVT != MVT::i32) 2778 return false; 2779 2780 unsigned Opc = ARM::MOVsr; 2781 unsigned ShiftImm; 2782 Value *Src2Value = I->getOperand(1); 2783 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2784 ShiftImm = CI->getZExtValue(); 2785 2786 // Fall back to selection DAG isel if the shift amount 2787 // is zero or greater than the width of the value type. 2788 if (ShiftImm == 0 || ShiftImm >=32) 2789 return false; 2790 2791 Opc = ARM::MOVsi; 2792 } 2793 2794 Value *Src1Value = I->getOperand(0); 2795 unsigned Reg1 = getRegForValue(Src1Value); 2796 if (Reg1 == 0) return false; 2797 2798 unsigned Reg2 = 0; 2799 if (Opc == ARM::MOVsr) { 2800 Reg2 = getRegForValue(Src2Value); 2801 if (Reg2 == 0) return false; 2802 } 2803 2804 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2805 if(ResultReg == 0) return false; 2806 2807 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2808 TII.get(Opc), ResultReg) 2809 .addReg(Reg1); 2810 2811 if (Opc == ARM::MOVsi) 2812 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2813 else if (Opc == ARM::MOVsr) { 2814 MIB.addReg(Reg2); 2815 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2816 } 2817 2818 AddOptionalDefs(MIB); 2819 UpdateValueMap(I, ResultReg); 2820 return true; 2821} 2822 2823// TODO: SoftFP support. 2824bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2825 2826 switch (I->getOpcode()) { 2827 case Instruction::Load: 2828 return SelectLoad(I); 2829 case Instruction::Store: 2830 return SelectStore(I); 2831 case Instruction::Br: 2832 return SelectBranch(I); 2833 case Instruction::IndirectBr: 2834 return SelectIndirectBr(I); 2835 case Instruction::ICmp: 2836 case Instruction::FCmp: 2837 return SelectCmp(I); 2838 case Instruction::FPExt: 2839 return SelectFPExt(I); 2840 case Instruction::FPTrunc: 2841 return SelectFPTrunc(I); 2842 case Instruction::SIToFP: 2843 return SelectIToFP(I, /*isSigned*/ true); 2844 case Instruction::UIToFP: 2845 return SelectIToFP(I, /*isSigned*/ false); 2846 case Instruction::FPToSI: 2847 return SelectFPToI(I, /*isSigned*/ true); 2848 case Instruction::FPToUI: 2849 return SelectFPToI(I, /*isSigned*/ false); 2850 case Instruction::Add: 2851 return SelectBinaryIntOp(I, ISD::ADD); 2852 case Instruction::Or: 2853 return SelectBinaryIntOp(I, ISD::OR); 2854 case Instruction::Sub: 2855 return SelectBinaryIntOp(I, ISD::SUB); 2856 case Instruction::FAdd: 2857 return SelectBinaryFPOp(I, ISD::FADD); 2858 case Instruction::FSub: 2859 return SelectBinaryFPOp(I, ISD::FSUB); 2860 case Instruction::FMul: 2861 return SelectBinaryFPOp(I, ISD::FMUL); 2862 case Instruction::SDiv: 2863 return SelectDiv(I, /*isSigned*/ true); 2864 case Instruction::UDiv: 2865 return SelectDiv(I, /*isSigned*/ false); 2866 case Instruction::SRem: 2867 return SelectRem(I, /*isSigned*/ true); 2868 case Instruction::URem: 2869 return SelectRem(I, /*isSigned*/ false); 2870 case Instruction::Call: 2871 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2872 return SelectIntrinsicCall(*II); 2873 return SelectCall(I); 2874 case Instruction::Select: 2875 return SelectSelect(I); 2876 case Instruction::Ret: 2877 return SelectRet(I); 2878 case Instruction::Trunc: 2879 return SelectTrunc(I); 2880 case Instruction::ZExt: 2881 case Instruction::SExt: 2882 return SelectIntExt(I); 2883 case Instruction::Shl: 2884 return SelectShift(I, ARM_AM::lsl); 2885 case Instruction::LShr: 2886 return SelectShift(I, ARM_AM::lsr); 2887 case Instruction::AShr: 2888 return SelectShift(I, ARM_AM::asr); 2889 default: break; 2890 } 2891 return false; 2892} 2893 2894namespace { 2895// This table describes sign- and zero-extend instructions which can be 2896// folded into a preceding load. All of these extends have an immediate 2897// (sometimes a mask and sometimes a shift) that's applied after 2898// extension. 2899const struct FoldableLoadExtendsStruct { 2900 uint16_t Opc[2]; // ARM, Thumb. 2901 uint8_t ExpectedImm; 2902 uint8_t isZExt : 1; 2903 uint8_t ExpectedVT : 7; 2904} FoldableLoadExtends[] = { 2905 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2906 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2907 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2908 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2909 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2910}; 2911} 2912 2913/// \brief The specified machine instr operand is a vreg, and that 2914/// vreg is being provided by the specified load instruction. If possible, 2915/// try to fold the load as an operand to the instruction, returning true if 2916/// successful. 2917bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2918 const LoadInst *LI) { 2919 // Verify we have a legal type before going any further. 2920 MVT VT; 2921 if (!isLoadTypeLegal(LI->getType(), VT)) 2922 return false; 2923 2924 // Combine load followed by zero- or sign-extend. 2925 // ldrb r1, [r0] ldrb r1, [r0] 2926 // uxtb r2, r1 => 2927 // mov r3, r2 mov r3, r1 2928 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2929 return false; 2930 const uint64_t Imm = MI->getOperand(2).getImm(); 2931 2932 bool Found = false; 2933 bool isZExt; 2934 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2935 i != e; ++i) { 2936 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2937 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2938 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 2939 Found = true; 2940 isZExt = FoldableLoadExtends[i].isZExt; 2941 } 2942 } 2943 if (!Found) return false; 2944 2945 // See if we can handle this address. 2946 Address Addr; 2947 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2948 2949 unsigned ResultReg = MI->getOperand(0).getReg(); 2950 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2951 return false; 2952 MI->eraseFromParent(); 2953 return true; 2954} 2955 2956unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2957 unsigned Align, MVT VT) { 2958 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2959 ARMConstantPoolConstant *CPV = 2960 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2961 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2962 2963 unsigned Opc; 2964 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2965 // Load value. 2966 if (isThumb2) { 2967 DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0); 2968 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2969 TII.get(ARM::t2LDRpci), DestReg1) 2970 .addConstantPoolIndex(Idx)); 2971 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2972 } else { 2973 // The extra immediate is for addrmode2. 2974 DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0); 2975 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2976 DbgLoc, TII.get(ARM::LDRcp), DestReg1) 2977 .addConstantPoolIndex(Idx).addImm(0)); 2978 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2979 } 2980 2981 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2982 if (GlobalBaseReg == 0) { 2983 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2984 AFI->setGlobalBaseReg(GlobalBaseReg); 2985 } 2986 2987 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2988 DestReg2 = constrainOperandRegClass(TII.get(Opc), DestReg2, 0); 2989 DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1); 2990 GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2); 2991 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2992 DbgLoc, TII.get(Opc), DestReg2) 2993 .addReg(DestReg1) 2994 .addReg(GlobalBaseReg); 2995 if (!UseGOTOFF) 2996 MIB.addImm(0); 2997 AddOptionalDefs(MIB); 2998 2999 return DestReg2; 3000} 3001 3002bool ARMFastISel::FastLowerArguments() { 3003 if (!FuncInfo.CanLowerReturn) 3004 return false; 3005 3006 const Function *F = FuncInfo.Fn; 3007 if (F->isVarArg()) 3008 return false; 3009 3010 CallingConv::ID CC = F->getCallingConv(); 3011 switch (CC) { 3012 default: 3013 return false; 3014 case CallingConv::Fast: 3015 case CallingConv::C: 3016 case CallingConv::ARM_AAPCS_VFP: 3017 case CallingConv::ARM_AAPCS: 3018 case CallingConv::ARM_APCS: 3019 break; 3020 } 3021 3022 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3023 // which are passed in r0 - r3. 3024 unsigned Idx = 1; 3025 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3026 I != E; ++I, ++Idx) { 3027 if (Idx > 4) 3028 return false; 3029 3030 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3031 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3032 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3033 return false; 3034 3035 Type *ArgTy = I->getType(); 3036 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3037 return false; 3038 3039 EVT ArgVT = TLI.getValueType(ArgTy); 3040 if (!ArgVT.isSimple()) return false; 3041 switch (ArgVT.getSimpleVT().SimpleTy) { 3042 case MVT::i8: 3043 case MVT::i16: 3044 case MVT::i32: 3045 break; 3046 default: 3047 return false; 3048 } 3049 } 3050 3051 3052 static const uint16_t GPRArgRegs[] = { 3053 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3054 }; 3055 3056 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3057 Idx = 0; 3058 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3059 I != E; ++I, ++Idx) { 3060 unsigned SrcReg = GPRArgRegs[Idx]; 3061 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3062 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3063 // Without this, EmitLiveInCopies may eliminate the livein if its only 3064 // use is a bitcast (which isn't turned into an instruction). 3065 unsigned ResultReg = createResultReg(RC); 3066 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 3067 TII.get(TargetOpcode::COPY), 3068 ResultReg).addReg(DstReg, getKillRegState(true)); 3069 UpdateValueMap(I, ResultReg); 3070 } 3071 3072 return true; 3073} 3074 3075namespace llvm { 3076 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3077 const TargetLibraryInfo *libInfo) { 3078 const TargetMachine &TM = funcInfo.MF->getTarget(); 3079 3080 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 3081 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 3082 bool UseFastISel = false; 3083 UseFastISel |= Subtarget->isTargetMachO() && !Subtarget->isThumb1Only(); 3084 UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); 3085 UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); 3086 3087 if (UseFastISel) { 3088 // iOS always has a FP for backtracking, force other targets 3089 // to keep their FP when doing FastISel. The emitted code is 3090 // currently superior, and in cases like test-suite's lencod 3091 // FastISel isn't quite correct when FP is eliminated. 3092 TM.Options.NoFramePointerElim = true; 3093 return new ARMFastISel(funcInfo, libInfo); 3094 } 3095 return 0; 3096 } 3097} 3098