ARMFastISel.cpp revision a8c4d739f2e763ae6dea7661a3af8393eeebd8ba
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMTargetMachine.h" 20#include "ARMSubtarget.h" 21#include "ARMConstantPoolValue.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/Operator.h" 30#include "llvm/CodeGen/Analysis.h" 31#include "llvm/CodeGen/FastISel.h" 32#include "llvm/CodeGen/FunctionLoweringInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineConstantPool.h" 36#include "llvm/CodeGen/MachineFrameInfo.h" 37#include "llvm/CodeGen/MachineMemOperand.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 91 : FastISel(funcInfo), 92 TM(funcInfo.MF->getTarget()), 93 TII(*TM.getInstrInfo()), 94 TLI(*TM.getTargetLowering()) { 95 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 96 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 97 isThumb2 = AFI->isThumbFunction(); 98 Context = &funcInfo.Fn->getContext(); 99 } 100 101 // Code from FastISel.cpp. 102 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 103 const TargetRegisterClass *RC); 104 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill); 107 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill); 111 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill, 115 unsigned Op2, bool Op2IsKill); 116 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 uint64_t Imm); 120 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 const ConstantFP *FPImm); 124 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 unsigned Op1, bool Op1IsKill, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 uint64_t Imm); 132 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 133 const TargetRegisterClass *RC, 134 uint64_t Imm1, uint64_t Imm2); 135 136 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 137 unsigned Op0, bool Op0IsKill, 138 uint32_t Idx); 139 140 // Backend specific FastISel code. 141 virtual bool TargetSelectInstruction(const Instruction *I); 142 virtual unsigned TargetMaterializeConstant(const Constant *C); 143 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 144 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 145 const LoadInst *LI); 146 147 #include "ARMGenFastISel.inc" 148 149 // Instruction selection routines. 150 private: 151 bool SelectLoad(const Instruction *I); 152 bool SelectStore(const Instruction *I); 153 bool SelectBranch(const Instruction *I); 154 bool SelectIndirectBr(const Instruction *I); 155 bool SelectCmp(const Instruction *I); 156 bool SelectFPExt(const Instruction *I); 157 bool SelectFPTrunc(const Instruction *I); 158 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 159 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 160 bool SelectIToFP(const Instruction *I, bool isSigned); 161 bool SelectFPToI(const Instruction *I, bool isSigned); 162 bool SelectDiv(const Instruction *I, bool isSigned); 163 bool SelectRem(const Instruction *I, bool isSigned); 164 bool SelectCall(const Instruction *I, const char *IntrMemName); 165 bool SelectIntrinsicCall(const IntrinsicInst &I); 166 bool SelectSelect(const Instruction *I); 167 bool SelectRet(const Instruction *I); 168 bool SelectTrunc(const Instruction *I); 169 bool SelectIntExt(const Instruction *I); 170 171 // Utility routines. 172 private: 173 bool isTypeLegal(Type *Ty, MVT &VT); 174 bool isLoadTypeLegal(Type *Ty, MVT &VT); 175 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 176 bool isZExt); 177 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 178 unsigned Alignment = 0, bool isZExt = true, 179 bool allocReg = true); 180 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 181 unsigned Alignment = 0); 182 bool ARMComputeAddress(const Value *Obj, Address &Addr); 183 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 184 bool ARMIsMemCpySmall(uint64_t Len); 185 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 186 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 187 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 188 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 189 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 190 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 191 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 192 unsigned ARMSelectCallOp(bool UseReg); 193 194 // Call handling routines. 195 private: 196 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 197 bool Return, 198 bool isVarArg); 199 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 200 SmallVectorImpl<unsigned> &ArgRegs, 201 SmallVectorImpl<MVT> &ArgVTs, 202 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 203 SmallVectorImpl<unsigned> &RegArgs, 204 CallingConv::ID CC, 205 unsigned &NumBytes, 206 bool isVarArg); 207 unsigned getLibcallReg(const Twine &Name); 208 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 209 const Instruction *I, CallingConv::ID CC, 210 unsigned &NumBytes, bool isVarArg); 211 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 212 213 // OptionalDef handling routines. 214 private: 215 bool isARMNEONPred(const MachineInstr *MI); 216 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 217 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 218 void AddLoadStoreOperands(EVT VT, Address &Addr, 219 const MachineInstrBuilder &MIB, 220 unsigned Flags, bool useAM3); 221}; 222 223} // end anonymous namespace 224 225#include "ARMGenCallingConv.inc" 226 227// DefinesOptionalPredicate - This is different from DefinesPredicate in that 228// we don't care about implicit defs here, just places we'll need to add a 229// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 230bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 231 if (!MI->hasOptionalDef()) 232 return false; 233 234 // Look to see if our OptionalDef is defining CPSR or CCR. 235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 236 const MachineOperand &MO = MI->getOperand(i); 237 if (!MO.isReg() || !MO.isDef()) continue; 238 if (MO.getReg() == ARM::CPSR) 239 *CPSR = true; 240 } 241 return true; 242} 243 244bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 245 const MCInstrDesc &MCID = MI->getDesc(); 246 247 // If we're a thumb2 or not NEON function we were handled via isPredicable. 248 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 249 AFI->isThumb2Function()) 250 return false; 251 252 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 253 if (MCID.OpInfo[i].isPredicate()) 254 return true; 255 256 return false; 257} 258 259// If the machine is predicable go ahead and add the predicate operands, if 260// it needs default CC operands add those. 261// TODO: If we want to support thumb1 then we'll need to deal with optional 262// CPSR defs that need to be added before the remaining operands. See s_cc_out 263// for descriptions why. 264const MachineInstrBuilder & 265ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 266 MachineInstr *MI = &*MIB; 267 268 // Do we use a predicate? or... 269 // Are we NEON in ARM mode and have a predicate operand? If so, I know 270 // we're not predicable but add it anyways. 271 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 272 AddDefaultPred(MIB); 273 274 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 275 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 276 bool CPSR = false; 277 if (DefinesOptionalPredicate(MI, &CPSR)) { 278 if (CPSR) 279 AddDefaultT1CC(MIB); 280 else 281 AddDefaultCC(MIB); 282 } 283 return MIB; 284} 285 286unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 287 const TargetRegisterClass* RC) { 288 unsigned ResultReg = createResultReg(RC); 289 const MCInstrDesc &II = TII.get(MachineInstOpcode); 290 291 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 292 return ResultReg; 293} 294 295unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 296 const TargetRegisterClass *RC, 297 unsigned Op0, bool Op0IsKill) { 298 unsigned ResultReg = createResultReg(RC); 299 const MCInstrDesc &II = TII.get(MachineInstOpcode); 300 301 if (II.getNumDefs() >= 1) { 302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 303 .addReg(Op0, Op0IsKill * RegState::Kill)); 304 } else { 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 306 .addReg(Op0, Op0IsKill * RegState::Kill)); 307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 308 TII.get(TargetOpcode::COPY), ResultReg) 309 .addReg(II.ImplicitDefs[0])); 310 } 311 return ResultReg; 312} 313 314unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 315 const TargetRegisterClass *RC, 316 unsigned Op0, bool Op0IsKill, 317 unsigned Op1, bool Op1IsKill) { 318 unsigned ResultReg = createResultReg(RC); 319 const MCInstrDesc &II = TII.get(MachineInstOpcode); 320 321 if (II.getNumDefs() >= 1) { 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 323 .addReg(Op0, Op0IsKill * RegState::Kill) 324 .addReg(Op1, Op1IsKill * RegState::Kill)); 325 } else { 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addReg(Op1, Op1IsKill * RegState::Kill)); 329 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 330 TII.get(TargetOpcode::COPY), ResultReg) 331 .addReg(II.ImplicitDefs[0])); 332 } 333 return ResultReg; 334} 335 336unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 337 const TargetRegisterClass *RC, 338 unsigned Op0, bool Op0IsKill, 339 unsigned Op1, bool Op1IsKill, 340 unsigned Op2, bool Op2IsKill) { 341 unsigned ResultReg = createResultReg(RC); 342 const MCInstrDesc &II = TII.get(MachineInstOpcode); 343 344 if (II.getNumDefs() >= 1) { 345 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 346 .addReg(Op0, Op0IsKill * RegState::Kill) 347 .addReg(Op1, Op1IsKill * RegState::Kill) 348 .addReg(Op2, Op2IsKill * RegState::Kill)); 349 } else { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 351 .addReg(Op0, Op0IsKill * RegState::Kill) 352 .addReg(Op1, Op1IsKill * RegState::Kill) 353 .addReg(Op2, Op2IsKill * RegState::Kill)); 354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 355 TII.get(TargetOpcode::COPY), ResultReg) 356 .addReg(II.ImplicitDefs[0])); 357 } 358 return ResultReg; 359} 360 361unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 362 const TargetRegisterClass *RC, 363 unsigned Op0, bool Op0IsKill, 364 uint64_t Imm) { 365 unsigned ResultReg = createResultReg(RC); 366 const MCInstrDesc &II = TII.get(MachineInstOpcode); 367 368 if (II.getNumDefs() >= 1) { 369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addImm(Imm)); 372 } else { 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 374 .addReg(Op0, Op0IsKill * RegState::Kill) 375 .addImm(Imm)); 376 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 377 TII.get(TargetOpcode::COPY), ResultReg) 378 .addReg(II.ImplicitDefs[0])); 379 } 380 return ResultReg; 381} 382 383unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 384 const TargetRegisterClass *RC, 385 unsigned Op0, bool Op0IsKill, 386 const ConstantFP *FPImm) { 387 unsigned ResultReg = createResultReg(RC); 388 const MCInstrDesc &II = TII.get(MachineInstOpcode); 389 390 if (II.getNumDefs() >= 1) { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addFPImm(FPImm)); 394 } else { 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 396 .addReg(Op0, Op0IsKill * RegState::Kill) 397 .addFPImm(FPImm)); 398 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 399 TII.get(TargetOpcode::COPY), ResultReg) 400 .addReg(II.ImplicitDefs[0])); 401 } 402 return ResultReg; 403} 404 405unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 406 const TargetRegisterClass *RC, 407 unsigned Op0, bool Op0IsKill, 408 unsigned Op1, bool Op1IsKill, 409 uint64_t Imm) { 410 unsigned ResultReg = createResultReg(RC); 411 const MCInstrDesc &II = TII.get(MachineInstOpcode); 412 413 if (II.getNumDefs() >= 1) { 414 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 415 .addReg(Op0, Op0IsKill * RegState::Kill) 416 .addReg(Op1, Op1IsKill * RegState::Kill) 417 .addImm(Imm)); 418 } else { 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 420 .addReg(Op0, Op0IsKill * RegState::Kill) 421 .addReg(Op1, Op1IsKill * RegState::Kill) 422 .addImm(Imm)); 423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 424 TII.get(TargetOpcode::COPY), ResultReg) 425 .addReg(II.ImplicitDefs[0])); 426 } 427 return ResultReg; 428} 429 430unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 431 const TargetRegisterClass *RC, 432 uint64_t Imm) { 433 unsigned ResultReg = createResultReg(RC); 434 const MCInstrDesc &II = TII.get(MachineInstOpcode); 435 436 if (II.getNumDefs() >= 1) { 437 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 438 .addImm(Imm)); 439 } else { 440 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 441 .addImm(Imm)); 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 443 TII.get(TargetOpcode::COPY), ResultReg) 444 .addReg(II.ImplicitDefs[0])); 445 } 446 return ResultReg; 447} 448 449unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 450 const TargetRegisterClass *RC, 451 uint64_t Imm1, uint64_t Imm2) { 452 unsigned ResultReg = createResultReg(RC); 453 const MCInstrDesc &II = TII.get(MachineInstOpcode); 454 455 if (II.getNumDefs() >= 1) { 456 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 457 .addImm(Imm1).addImm(Imm2)); 458 } else { 459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 460 .addImm(Imm1).addImm(Imm2)); 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 462 TII.get(TargetOpcode::COPY), 463 ResultReg) 464 .addReg(II.ImplicitDefs[0])); 465 } 466 return ResultReg; 467} 468 469unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 470 unsigned Op0, bool Op0IsKill, 471 uint32_t Idx) { 472 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 473 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 474 "Cannot yet extract from physregs"); 475 476 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 477 DL, TII.get(TargetOpcode::COPY), ResultReg) 478 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 479 return ResultReg; 480} 481 482// TODO: Don't worry about 64-bit now, but when this is fixed remove the 483// checks from the various callers. 484unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 485 if (VT == MVT::f64) return 0; 486 487 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 488 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 489 TII.get(ARM::VMOVSR), MoveReg) 490 .addReg(SrcReg)); 491 return MoveReg; 492} 493 494unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 495 if (VT == MVT::i64) return 0; 496 497 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 498 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 499 TII.get(ARM::VMOVRS), MoveReg) 500 .addReg(SrcReg)); 501 return MoveReg; 502} 503 504// For double width floating point we need to materialize two constants 505// (the high and the low) into integer registers then use a move to get 506// the combined constant into an FP reg. 507unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 508 const APFloat Val = CFP->getValueAPF(); 509 bool is64bit = VT == MVT::f64; 510 511 // This checks to see if we can use VFP3 instructions to materialize 512 // a constant, otherwise we have to go through the constant pool. 513 if (TLI.isFPImmLegal(Val, VT)) { 514 int Imm; 515 unsigned Opc; 516 if (is64bit) { 517 Imm = ARM_AM::getFP64Imm(Val); 518 Opc = ARM::FCONSTD; 519 } else { 520 Imm = ARM_AM::getFP32Imm(Val); 521 Opc = ARM::FCONSTS; 522 } 523 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 524 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 525 DestReg) 526 .addImm(Imm)); 527 return DestReg; 528 } 529 530 // Require VFP2 for loading fp constants. 531 if (!Subtarget->hasVFP2()) return false; 532 533 // MachineConstantPool wants an explicit alignment. 534 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 535 if (Align == 0) { 536 // TODO: Figure out if this is correct. 537 Align = TD.getTypeAllocSize(CFP->getType()); 538 } 539 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 540 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 541 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 542 543 // The extra reg is for addrmode5. 544 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 545 DestReg) 546 .addConstantPoolIndex(Idx) 547 .addReg(0)); 548 return DestReg; 549} 550 551unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 552 553 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 554 return false; 555 556 // If we can do this in a single instruction without a constant pool entry 557 // do so now. 558 const ConstantInt *CI = cast<ConstantInt>(C); 559 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 560 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 561 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 562 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 563 TII.get(Opc), ImmReg) 564 .addImm(CI->getZExtValue())); 565 return ImmReg; 566 } 567 568 // Use MVN to emit negative constants. 569 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 570 unsigned Imm = (unsigned)~(CI->getSExtValue()); 571 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 572 (ARM_AM::getSOImmVal(Imm) != -1); 573 if (UseImm) { 574 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 575 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 576 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 577 TII.get(Opc), ImmReg) 578 .addImm(Imm)); 579 return ImmReg; 580 } 581 } 582 583 // Load from constant pool. For now 32-bit only. 584 if (VT != MVT::i32) 585 return false; 586 587 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 588 589 // MachineConstantPool wants an explicit alignment. 590 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 591 if (Align == 0) { 592 // TODO: Figure out if this is correct. 593 Align = TD.getTypeAllocSize(C->getType()); 594 } 595 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 596 597 if (isThumb2) 598 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 599 TII.get(ARM::t2LDRpci), DestReg) 600 .addConstantPoolIndex(Idx)); 601 else 602 // The extra immediate is for addrmode2. 603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 604 TII.get(ARM::LDRcp), DestReg) 605 .addConstantPoolIndex(Idx) 606 .addImm(0)); 607 608 return DestReg; 609} 610 611unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 612 // For now 32-bit only. 613 if (VT != MVT::i32) return 0; 614 615 Reloc::Model RelocM = TM.getRelocationModel(); 616 617 // TODO: Need more magic for ARM PIC. 618 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 619 620 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 621 622 // Use movw+movt when possible, it avoids constant pool entries. 623 // Darwin targets don't support movt with Reloc::Static, see 624 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 625 // static movt relocations. 626 if (Subtarget->useMovt() && 627 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 628 unsigned Opc; 629 switch (RelocM) { 630 case Reloc::PIC_: 631 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 632 break; 633 case Reloc::DynamicNoPIC: 634 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 635 break; 636 default: 637 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 638 break; 639 } 640 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 641 DestReg).addGlobalAddress(GV)); 642 } else { 643 // MachineConstantPool wants an explicit alignment. 644 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 645 if (Align == 0) { 646 // TODO: Figure out if this is correct. 647 Align = TD.getTypeAllocSize(GV->getType()); 648 } 649 650 // Grab index. 651 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 652 (Subtarget->isThumb() ? 4 : 8); 653 unsigned Id = AFI->createPICLabelUId(); 654 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 655 ARMCP::CPValue, 656 PCAdj); 657 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 658 659 // Load value. 660 MachineInstrBuilder MIB; 661 if (isThumb2) { 662 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 663 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 664 .addConstantPoolIndex(Idx); 665 if (RelocM == Reloc::PIC_) 666 MIB.addImm(Id); 667 } else { 668 // The extra immediate is for addrmode2. 669 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 670 DestReg) 671 .addConstantPoolIndex(Idx) 672 .addImm(0); 673 } 674 AddOptionalDefs(MIB); 675 } 676 677 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 678 MachineInstrBuilder MIB; 679 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 680 if (isThumb2) 681 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 682 TII.get(ARM::t2LDRi12), NewDestReg) 683 .addReg(DestReg) 684 .addImm(0); 685 else 686 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 687 NewDestReg) 688 .addReg(DestReg) 689 .addImm(0); 690 DestReg = NewDestReg; 691 AddOptionalDefs(MIB); 692 } 693 694 return DestReg; 695} 696 697unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 698 EVT VT = TLI.getValueType(C->getType(), true); 699 700 // Only handle simple types. 701 if (!VT.isSimple()) return 0; 702 703 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 704 return ARMMaterializeFP(CFP, VT); 705 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 706 return ARMMaterializeGV(GV, VT); 707 else if (isa<ConstantInt>(C)) 708 return ARMMaterializeInt(C, VT); 709 710 return 0; 711} 712 713// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 714 715unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 716 // Don't handle dynamic allocas. 717 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 718 719 MVT VT; 720 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 721 722 DenseMap<const AllocaInst*, int>::iterator SI = 723 FuncInfo.StaticAllocaMap.find(AI); 724 725 // This will get lowered later into the correct offsets and registers 726 // via rewriteXFrameIndex. 727 if (SI != FuncInfo.StaticAllocaMap.end()) { 728 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 729 unsigned ResultReg = createResultReg(RC); 730 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 731 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 732 TII.get(Opc), ResultReg) 733 .addFrameIndex(SI->second) 734 .addImm(0)); 735 return ResultReg; 736 } 737 738 return 0; 739} 740 741bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 742 EVT evt = TLI.getValueType(Ty, true); 743 744 // Only handle simple types. 745 if (evt == MVT::Other || !evt.isSimple()) return false; 746 VT = evt.getSimpleVT(); 747 748 // Handle all legal types, i.e. a register that will directly hold this 749 // value. 750 return TLI.isTypeLegal(VT); 751} 752 753bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 754 if (isTypeLegal(Ty, VT)) return true; 755 756 // If this is a type than can be sign or zero-extended to a basic operation 757 // go ahead and accept it now. 758 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 759 return true; 760 761 return false; 762} 763 764// Computes the address to get to an object. 765bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 766 // Some boilerplate from the X86 FastISel. 767 const User *U = NULL; 768 unsigned Opcode = Instruction::UserOp1; 769 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 770 // Don't walk into other basic blocks unless the object is an alloca from 771 // another block, otherwise it may not have a virtual register assigned. 772 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 773 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 774 Opcode = I->getOpcode(); 775 U = I; 776 } 777 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 778 Opcode = C->getOpcode(); 779 U = C; 780 } 781 782 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 783 if (Ty->getAddressSpace() > 255) 784 // Fast instruction selection doesn't support the special 785 // address spaces. 786 return false; 787 788 switch (Opcode) { 789 default: 790 break; 791 case Instruction::BitCast: { 792 // Look through bitcasts. 793 return ARMComputeAddress(U->getOperand(0), Addr); 794 } 795 case Instruction::IntToPtr: { 796 // Look past no-op inttoptrs. 797 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 798 return ARMComputeAddress(U->getOperand(0), Addr); 799 break; 800 } 801 case Instruction::PtrToInt: { 802 // Look past no-op ptrtoints. 803 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 804 return ARMComputeAddress(U->getOperand(0), Addr); 805 break; 806 } 807 case Instruction::GetElementPtr: { 808 Address SavedAddr = Addr; 809 int TmpOffset = Addr.Offset; 810 811 // Iterate through the GEP folding the constants into offsets where 812 // we can. 813 gep_type_iterator GTI = gep_type_begin(U); 814 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 815 i != e; ++i, ++GTI) { 816 const Value *Op = *i; 817 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 818 const StructLayout *SL = TD.getStructLayout(STy); 819 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 820 TmpOffset += SL->getElementOffset(Idx); 821 } else { 822 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 823 for (;;) { 824 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 825 // Constant-offset addressing. 826 TmpOffset += CI->getSExtValue() * S; 827 break; 828 } 829 if (isa<AddOperator>(Op) && 830 (!isa<Instruction>(Op) || 831 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 832 == FuncInfo.MBB) && 833 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 834 // An add (in the same block) with a constant operand. Fold the 835 // constant. 836 ConstantInt *CI = 837 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 838 TmpOffset += CI->getSExtValue() * S; 839 // Iterate on the other operand. 840 Op = cast<AddOperator>(Op)->getOperand(0); 841 continue; 842 } 843 // Unsupported 844 goto unsupported_gep; 845 } 846 } 847 } 848 849 // Try to grab the base operand now. 850 Addr.Offset = TmpOffset; 851 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 852 853 // We failed, restore everything and try the other options. 854 Addr = SavedAddr; 855 856 unsupported_gep: 857 break; 858 } 859 case Instruction::Alloca: { 860 const AllocaInst *AI = cast<AllocaInst>(Obj); 861 DenseMap<const AllocaInst*, int>::iterator SI = 862 FuncInfo.StaticAllocaMap.find(AI); 863 if (SI != FuncInfo.StaticAllocaMap.end()) { 864 Addr.BaseType = Address::FrameIndexBase; 865 Addr.Base.FI = SI->second; 866 return true; 867 } 868 break; 869 } 870 } 871 872 // Try to get this in a register if nothing else has worked. 873 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 874 return Addr.Base.Reg != 0; 875} 876 877void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 878 879 assert(VT.isSimple() && "Non-simple types are invalid here!"); 880 881 bool needsLowering = false; 882 switch (VT.getSimpleVT().SimpleTy) { 883 default: llvm_unreachable("Unhandled load/store type!"); 884 case MVT::i1: 885 case MVT::i8: 886 case MVT::i16: 887 case MVT::i32: 888 if (!useAM3) { 889 // Integer loads/stores handle 12-bit offsets. 890 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 891 // Handle negative offsets. 892 if (needsLowering && isThumb2) 893 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 894 Addr.Offset > -256); 895 } else { 896 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 897 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 898 } 899 break; 900 case MVT::f32: 901 case MVT::f64: 902 // Floating point operands handle 8-bit offsets. 903 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 904 break; 905 } 906 907 // If this is a stack pointer and the offset needs to be simplified then 908 // put the alloca address into a register, set the base type back to 909 // register and continue. This should almost never happen. 910 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 911 const TargetRegisterClass *RC = isThumb2 ? 912 (const TargetRegisterClass*)&ARM::tGPRRegClass : 913 (const TargetRegisterClass*)&ARM::GPRRegClass; 914 unsigned ResultReg = createResultReg(RC); 915 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 916 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 917 TII.get(Opc), ResultReg) 918 .addFrameIndex(Addr.Base.FI) 919 .addImm(0)); 920 Addr.Base.Reg = ResultReg; 921 Addr.BaseType = Address::RegBase; 922 } 923 924 // Since the offset is too large for the load/store instruction 925 // get the reg+offset into a register. 926 if (needsLowering) { 927 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 928 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 929 Addr.Offset = 0; 930 } 931} 932 933void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 934 const MachineInstrBuilder &MIB, 935 unsigned Flags, bool useAM3) { 936 // addrmode5 output depends on the selection dag addressing dividing the 937 // offset by 4 that it then later multiplies. Do this here as well. 938 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 939 VT.getSimpleVT().SimpleTy == MVT::f64) 940 Addr.Offset /= 4; 941 942 // Frame base works a bit differently. Handle it separately. 943 if (Addr.BaseType == Address::FrameIndexBase) { 944 int FI = Addr.Base.FI; 945 int Offset = Addr.Offset; 946 MachineMemOperand *MMO = 947 FuncInfo.MF->getMachineMemOperand( 948 MachinePointerInfo::getFixedStack(FI, Offset), 949 Flags, 950 MFI.getObjectSize(FI), 951 MFI.getObjectAlignment(FI)); 952 // Now add the rest of the operands. 953 MIB.addFrameIndex(FI); 954 955 // ARM halfword load/stores and signed byte loads need an additional 956 // operand. 957 if (useAM3) { 958 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 959 MIB.addReg(0); 960 MIB.addImm(Imm); 961 } else { 962 MIB.addImm(Addr.Offset); 963 } 964 MIB.addMemOperand(MMO); 965 } else { 966 // Now add the rest of the operands. 967 MIB.addReg(Addr.Base.Reg); 968 969 // ARM halfword load/stores and signed byte loads need an additional 970 // operand. 971 if (useAM3) { 972 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 973 MIB.addReg(0); 974 MIB.addImm(Imm); 975 } else { 976 MIB.addImm(Addr.Offset); 977 } 978 } 979 AddOptionalDefs(MIB); 980} 981 982bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 983 unsigned Alignment, bool isZExt, bool allocReg) { 984 assert(VT.isSimple() && "Non-simple types are invalid here!"); 985 unsigned Opc; 986 bool useAM3 = false; 987 bool needVMOV = false; 988 const TargetRegisterClass *RC; 989 switch (VT.getSimpleVT().SimpleTy) { 990 // This is mostly going to be Neon/vector support. 991 default: return false; 992 case MVT::i1: 993 case MVT::i8: 994 if (isThumb2) { 995 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 996 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 997 else 998 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 999 } else { 1000 if (isZExt) { 1001 Opc = ARM::LDRBi12; 1002 } else { 1003 Opc = ARM::LDRSB; 1004 useAM3 = true; 1005 } 1006 } 1007 RC = &ARM::GPRRegClass; 1008 break; 1009 case MVT::i16: 1010 if (isThumb2) { 1011 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1012 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1013 else 1014 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1015 } else { 1016 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1017 useAM3 = true; 1018 } 1019 RC = &ARM::GPRRegClass; 1020 break; 1021 case MVT::i32: 1022 if (isThumb2) { 1023 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1024 Opc = ARM::t2LDRi8; 1025 else 1026 Opc = ARM::t2LDRi12; 1027 } else { 1028 Opc = ARM::LDRi12; 1029 } 1030 RC = &ARM::GPRRegClass; 1031 break; 1032 case MVT::f32: 1033 if (!Subtarget->hasVFP2()) return false; 1034 // Unaligned loads need special handling. Floats require word-alignment. 1035 if (Alignment && Alignment < 4) { 1036 needVMOV = true; 1037 VT = MVT::i32; 1038 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1039 RC = &ARM::GPRRegClass; 1040 } else { 1041 Opc = ARM::VLDRS; 1042 RC = TLI.getRegClassFor(VT); 1043 } 1044 break; 1045 case MVT::f64: 1046 if (!Subtarget->hasVFP2()) return false; 1047 // FIXME: Unaligned loads need special handling. Doublewords require 1048 // word-alignment. 1049 if (Alignment && Alignment < 4) 1050 return false; 1051 1052 Opc = ARM::VLDRD; 1053 RC = TLI.getRegClassFor(VT); 1054 break; 1055 } 1056 // Simplify this down to something we can handle. 1057 ARMSimplifyAddress(Addr, VT, useAM3); 1058 1059 // Create the base instruction, then add the operands. 1060 if (allocReg) 1061 ResultReg = createResultReg(RC); 1062 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1063 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1064 TII.get(Opc), ResultReg); 1065 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1066 1067 // If we had an unaligned load of a float we've converted it to an regular 1068 // load. Now we must move from the GRP to the FP register. 1069 if (needVMOV) { 1070 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1071 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1072 TII.get(ARM::VMOVSR), MoveReg) 1073 .addReg(ResultReg)); 1074 ResultReg = MoveReg; 1075 } 1076 return true; 1077} 1078 1079bool ARMFastISel::SelectLoad(const Instruction *I) { 1080 // Atomic loads need special handling. 1081 if (cast<LoadInst>(I)->isAtomic()) 1082 return false; 1083 1084 // Verify we have a legal type before going any further. 1085 MVT VT; 1086 if (!isLoadTypeLegal(I->getType(), VT)) 1087 return false; 1088 1089 // See if we can handle this address. 1090 Address Addr; 1091 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1092 1093 unsigned ResultReg; 1094 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1095 return false; 1096 UpdateValueMap(I, ResultReg); 1097 return true; 1098} 1099 1100bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1101 unsigned Alignment) { 1102 unsigned StrOpc; 1103 bool useAM3 = false; 1104 switch (VT.getSimpleVT().SimpleTy) { 1105 // This is mostly going to be Neon/vector support. 1106 default: return false; 1107 case MVT::i1: { 1108 unsigned Res = createResultReg(isThumb2 ? 1109 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1110 (const TargetRegisterClass*)&ARM::GPRRegClass); 1111 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1112 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1113 TII.get(Opc), Res) 1114 .addReg(SrcReg).addImm(1)); 1115 SrcReg = Res; 1116 } // Fallthrough here. 1117 case MVT::i8: 1118 if (isThumb2) { 1119 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1120 StrOpc = ARM::t2STRBi8; 1121 else 1122 StrOpc = ARM::t2STRBi12; 1123 } else { 1124 StrOpc = ARM::STRBi12; 1125 } 1126 break; 1127 case MVT::i16: 1128 if (isThumb2) { 1129 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1130 StrOpc = ARM::t2STRHi8; 1131 else 1132 StrOpc = ARM::t2STRHi12; 1133 } else { 1134 StrOpc = ARM::STRH; 1135 useAM3 = true; 1136 } 1137 break; 1138 case MVT::i32: 1139 if (isThumb2) { 1140 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1141 StrOpc = ARM::t2STRi8; 1142 else 1143 StrOpc = ARM::t2STRi12; 1144 } else { 1145 StrOpc = ARM::STRi12; 1146 } 1147 break; 1148 case MVT::f32: 1149 if (!Subtarget->hasVFP2()) return false; 1150 // Unaligned stores need special handling. Floats require word-alignment. 1151 if (Alignment && Alignment < 4) { 1152 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1153 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1154 TII.get(ARM::VMOVRS), MoveReg) 1155 .addReg(SrcReg)); 1156 SrcReg = MoveReg; 1157 VT = MVT::i32; 1158 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1159 } else { 1160 StrOpc = ARM::VSTRS; 1161 } 1162 break; 1163 case MVT::f64: 1164 if (!Subtarget->hasVFP2()) return false; 1165 // FIXME: Unaligned stores need special handling. Doublewords require 1166 // word-alignment. 1167 if (Alignment && Alignment < 4) 1168 return false; 1169 1170 StrOpc = ARM::VSTRD; 1171 break; 1172 } 1173 // Simplify this down to something we can handle. 1174 ARMSimplifyAddress(Addr, VT, useAM3); 1175 1176 // Create the base instruction, then add the operands. 1177 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1178 TII.get(StrOpc)) 1179 .addReg(SrcReg); 1180 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1181 return true; 1182} 1183 1184bool ARMFastISel::SelectStore(const Instruction *I) { 1185 Value *Op0 = I->getOperand(0); 1186 unsigned SrcReg = 0; 1187 1188 // Atomic stores need special handling. 1189 if (cast<StoreInst>(I)->isAtomic()) 1190 return false; 1191 1192 // Verify we have a legal type before going any further. 1193 MVT VT; 1194 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1195 return false; 1196 1197 // Get the value to be stored into a register. 1198 SrcReg = getRegForValue(Op0); 1199 if (SrcReg == 0) return false; 1200 1201 // See if we can handle this address. 1202 Address Addr; 1203 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1204 return false; 1205 1206 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1207 return false; 1208 return true; 1209} 1210 1211static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1212 switch (Pred) { 1213 // Needs two compares... 1214 case CmpInst::FCMP_ONE: 1215 case CmpInst::FCMP_UEQ: 1216 default: 1217 // AL is our "false" for now. The other two need more compares. 1218 return ARMCC::AL; 1219 case CmpInst::ICMP_EQ: 1220 case CmpInst::FCMP_OEQ: 1221 return ARMCC::EQ; 1222 case CmpInst::ICMP_SGT: 1223 case CmpInst::FCMP_OGT: 1224 return ARMCC::GT; 1225 case CmpInst::ICMP_SGE: 1226 case CmpInst::FCMP_OGE: 1227 return ARMCC::GE; 1228 case CmpInst::ICMP_UGT: 1229 case CmpInst::FCMP_UGT: 1230 return ARMCC::HI; 1231 case CmpInst::FCMP_OLT: 1232 return ARMCC::MI; 1233 case CmpInst::ICMP_ULE: 1234 case CmpInst::FCMP_OLE: 1235 return ARMCC::LS; 1236 case CmpInst::FCMP_ORD: 1237 return ARMCC::VC; 1238 case CmpInst::FCMP_UNO: 1239 return ARMCC::VS; 1240 case CmpInst::FCMP_UGE: 1241 return ARMCC::PL; 1242 case CmpInst::ICMP_SLT: 1243 case CmpInst::FCMP_ULT: 1244 return ARMCC::LT; 1245 case CmpInst::ICMP_SLE: 1246 case CmpInst::FCMP_ULE: 1247 return ARMCC::LE; 1248 case CmpInst::FCMP_UNE: 1249 case CmpInst::ICMP_NE: 1250 return ARMCC::NE; 1251 case CmpInst::ICMP_UGE: 1252 return ARMCC::HS; 1253 case CmpInst::ICMP_ULT: 1254 return ARMCC::LO; 1255 } 1256} 1257 1258bool ARMFastISel::SelectBranch(const Instruction *I) { 1259 const BranchInst *BI = cast<BranchInst>(I); 1260 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1261 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1262 1263 // Simple branch support. 1264 1265 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1266 // behavior. 1267 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1268 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1269 1270 // Get the compare predicate. 1271 // Try to take advantage of fallthrough opportunities. 1272 CmpInst::Predicate Predicate = CI->getPredicate(); 1273 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1274 std::swap(TBB, FBB); 1275 Predicate = CmpInst::getInversePredicate(Predicate); 1276 } 1277 1278 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1279 1280 // We may not handle every CC for now. 1281 if (ARMPred == ARMCC::AL) return false; 1282 1283 // Emit the compare. 1284 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1285 return false; 1286 1287 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1289 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1290 FastEmitBranch(FBB, DL); 1291 FuncInfo.MBB->addSuccessor(TBB); 1292 return true; 1293 } 1294 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1295 MVT SourceVT; 1296 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1297 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1298 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1299 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1300 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1301 TII.get(TstOpc)) 1302 .addReg(OpReg).addImm(1)); 1303 1304 unsigned CCMode = ARMCC::NE; 1305 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1306 std::swap(TBB, FBB); 1307 CCMode = ARMCC::EQ; 1308 } 1309 1310 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1312 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1313 1314 FastEmitBranch(FBB, DL); 1315 FuncInfo.MBB->addSuccessor(TBB); 1316 return true; 1317 } 1318 } else if (const ConstantInt *CI = 1319 dyn_cast<ConstantInt>(BI->getCondition())) { 1320 uint64_t Imm = CI->getZExtValue(); 1321 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1322 FastEmitBranch(Target, DL); 1323 return true; 1324 } 1325 1326 unsigned CmpReg = getRegForValue(BI->getCondition()); 1327 if (CmpReg == 0) return false; 1328 1329 // We've been divorced from our compare! Our block was split, and 1330 // now our compare lives in a predecessor block. We musn't 1331 // re-compare here, as the children of the compare aren't guaranteed 1332 // live across the block boundary (we *could* check for this). 1333 // Regardless, the compare has been done in the predecessor block, 1334 // and it left a value for us in a virtual register. Ergo, we test 1335 // the one-bit value left in the virtual register. 1336 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1338 .addReg(CmpReg).addImm(1)); 1339 1340 unsigned CCMode = ARMCC::NE; 1341 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1342 std::swap(TBB, FBB); 1343 CCMode = ARMCC::EQ; 1344 } 1345 1346 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1347 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1348 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1349 FastEmitBranch(FBB, DL); 1350 FuncInfo.MBB->addSuccessor(TBB); 1351 return true; 1352} 1353 1354bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1355 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1356 if (AddrReg == 0) return false; 1357 1358 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1360 .addReg(AddrReg)); 1361 return true; 1362} 1363 1364bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1365 bool isZExt) { 1366 Type *Ty = Src1Value->getType(); 1367 EVT SrcVT = TLI.getValueType(Ty, true); 1368 if (!SrcVT.isSimple()) return false; 1369 1370 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1371 if (isFloat && !Subtarget->hasVFP2()) 1372 return false; 1373 1374 // Check to see if the 2nd operand is a constant that we can encode directly 1375 // in the compare. 1376 int Imm = 0; 1377 bool UseImm = false; 1378 bool isNegativeImm = false; 1379 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1380 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1381 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1382 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1383 SrcVT == MVT::i1) { 1384 const APInt &CIVal = ConstInt->getValue(); 1385 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1386 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1387 // then a cmn, because there is no way to represent 2147483648 as a 1388 // signed 32-bit int. 1389 if (Imm < 0 && Imm != (int)0x80000000) { 1390 isNegativeImm = true; 1391 Imm = -Imm; 1392 } 1393 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1394 (ARM_AM::getSOImmVal(Imm) != -1); 1395 } 1396 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1397 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1398 if (ConstFP->isZero() && !ConstFP->isNegative()) 1399 UseImm = true; 1400 } 1401 1402 unsigned CmpOpc; 1403 bool isICmp = true; 1404 bool needsExt = false; 1405 switch (SrcVT.getSimpleVT().SimpleTy) { 1406 default: return false; 1407 // TODO: Verify compares. 1408 case MVT::f32: 1409 isICmp = false; 1410 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1411 break; 1412 case MVT::f64: 1413 isICmp = false; 1414 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1415 break; 1416 case MVT::i1: 1417 case MVT::i8: 1418 case MVT::i16: 1419 needsExt = true; 1420 // Intentional fall-through. 1421 case MVT::i32: 1422 if (isThumb2) { 1423 if (!UseImm) 1424 CmpOpc = ARM::t2CMPrr; 1425 else 1426 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1427 } else { 1428 if (!UseImm) 1429 CmpOpc = ARM::CMPrr; 1430 else 1431 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1432 } 1433 break; 1434 } 1435 1436 unsigned SrcReg1 = getRegForValue(Src1Value); 1437 if (SrcReg1 == 0) return false; 1438 1439 unsigned SrcReg2 = 0; 1440 if (!UseImm) { 1441 SrcReg2 = getRegForValue(Src2Value); 1442 if (SrcReg2 == 0) return false; 1443 } 1444 1445 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1446 if (needsExt) { 1447 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1448 if (SrcReg1 == 0) return false; 1449 if (!UseImm) { 1450 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1451 if (SrcReg2 == 0) return false; 1452 } 1453 } 1454 1455 if (!UseImm) { 1456 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1457 TII.get(CmpOpc)) 1458 .addReg(SrcReg1).addReg(SrcReg2)); 1459 } else { 1460 MachineInstrBuilder MIB; 1461 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1462 .addReg(SrcReg1); 1463 1464 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1465 if (isICmp) 1466 MIB.addImm(Imm); 1467 AddOptionalDefs(MIB); 1468 } 1469 1470 // For floating point we need to move the result to a comparison register 1471 // that we can then use for branches. 1472 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1473 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1474 TII.get(ARM::FMSTAT))); 1475 return true; 1476} 1477 1478bool ARMFastISel::SelectCmp(const Instruction *I) { 1479 const CmpInst *CI = cast<CmpInst>(I); 1480 1481 // Get the compare predicate. 1482 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1483 1484 // We may not handle every CC for now. 1485 if (ARMPred == ARMCC::AL) return false; 1486 1487 // Emit the compare. 1488 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1489 return false; 1490 1491 // Now set a register based on the comparison. Explicitly set the predicates 1492 // here. 1493 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1494 const TargetRegisterClass *RC = isThumb2 ? 1495 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1496 (const TargetRegisterClass*)&ARM::GPRRegClass; 1497 unsigned DestReg = createResultReg(RC); 1498 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1499 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1500 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1501 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1502 .addReg(ZeroReg).addImm(1) 1503 .addImm(ARMPred).addReg(ARM::CPSR); 1504 1505 UpdateValueMap(I, DestReg); 1506 return true; 1507} 1508 1509bool ARMFastISel::SelectFPExt(const Instruction *I) { 1510 // Make sure we have VFP and that we're extending float to double. 1511 if (!Subtarget->hasVFP2()) return false; 1512 1513 Value *V = I->getOperand(0); 1514 if (!I->getType()->isDoubleTy() || 1515 !V->getType()->isFloatTy()) return false; 1516 1517 unsigned Op = getRegForValue(V); 1518 if (Op == 0) return false; 1519 1520 unsigned Result = createResultReg(&ARM::DPRRegClass); 1521 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1522 TII.get(ARM::VCVTDS), Result) 1523 .addReg(Op)); 1524 UpdateValueMap(I, Result); 1525 return true; 1526} 1527 1528bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1529 // Make sure we have VFP and that we're truncating double to float. 1530 if (!Subtarget->hasVFP2()) return false; 1531 1532 Value *V = I->getOperand(0); 1533 if (!(I->getType()->isFloatTy() && 1534 V->getType()->isDoubleTy())) return false; 1535 1536 unsigned Op = getRegForValue(V); 1537 if (Op == 0) return false; 1538 1539 unsigned Result = createResultReg(&ARM::SPRRegClass); 1540 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1541 TII.get(ARM::VCVTSD), Result) 1542 .addReg(Op)); 1543 UpdateValueMap(I, Result); 1544 return true; 1545} 1546 1547bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1548 // Make sure we have VFP. 1549 if (!Subtarget->hasVFP2()) return false; 1550 1551 MVT DstVT; 1552 Type *Ty = I->getType(); 1553 if (!isTypeLegal(Ty, DstVT)) 1554 return false; 1555 1556 Value *Src = I->getOperand(0); 1557 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1558 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1559 return false; 1560 1561 unsigned SrcReg = getRegForValue(Src); 1562 if (SrcReg == 0) return false; 1563 1564 // Handle sign-extension. 1565 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1566 EVT DestVT = MVT::i32; 1567 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1568 /*isZExt*/!isSigned); 1569 if (SrcReg == 0) return false; 1570 } 1571 1572 // The conversion routine works on fp-reg to fp-reg and the operand above 1573 // was an integer, move it to the fp registers if possible. 1574 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1575 if (FP == 0) return false; 1576 1577 unsigned Opc; 1578 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1579 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1580 else return false; 1581 1582 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1583 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1584 ResultReg) 1585 .addReg(FP)); 1586 UpdateValueMap(I, ResultReg); 1587 return true; 1588} 1589 1590bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1591 // Make sure we have VFP. 1592 if (!Subtarget->hasVFP2()) return false; 1593 1594 MVT DstVT; 1595 Type *RetTy = I->getType(); 1596 if (!isTypeLegal(RetTy, DstVT)) 1597 return false; 1598 1599 unsigned Op = getRegForValue(I->getOperand(0)); 1600 if (Op == 0) return false; 1601 1602 unsigned Opc; 1603 Type *OpTy = I->getOperand(0)->getType(); 1604 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1605 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1606 else return false; 1607 1608 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1609 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1610 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1611 ResultReg) 1612 .addReg(Op)); 1613 1614 // This result needs to be in an integer register, but the conversion only 1615 // takes place in fp-regs. 1616 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1617 if (IntReg == 0) return false; 1618 1619 UpdateValueMap(I, IntReg); 1620 return true; 1621} 1622 1623bool ARMFastISel::SelectSelect(const Instruction *I) { 1624 MVT VT; 1625 if (!isTypeLegal(I->getType(), VT)) 1626 return false; 1627 1628 // Things need to be register sized for register moves. 1629 if (VT != MVT::i32) return false; 1630 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1631 1632 unsigned CondReg = getRegForValue(I->getOperand(0)); 1633 if (CondReg == 0) return false; 1634 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1635 if (Op1Reg == 0) return false; 1636 1637 // Check to see if we can use an immediate in the conditional move. 1638 int Imm = 0; 1639 bool UseImm = false; 1640 bool isNegativeImm = false; 1641 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1642 assert (VT == MVT::i32 && "Expecting an i32."); 1643 Imm = (int)ConstInt->getValue().getZExtValue(); 1644 if (Imm < 0) { 1645 isNegativeImm = true; 1646 Imm = ~Imm; 1647 } 1648 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1649 (ARM_AM::getSOImmVal(Imm) != -1); 1650 } 1651 1652 unsigned Op2Reg = 0; 1653 if (!UseImm) { 1654 Op2Reg = getRegForValue(I->getOperand(2)); 1655 if (Op2Reg == 0) return false; 1656 } 1657 1658 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1659 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1660 .addReg(CondReg).addImm(0)); 1661 1662 unsigned MovCCOpc; 1663 if (!UseImm) { 1664 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1665 } else { 1666 if (!isNegativeImm) { 1667 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1668 } else { 1669 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1670 } 1671 } 1672 unsigned ResultReg = createResultReg(RC); 1673 if (!UseImm) 1674 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1675 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1676 else 1677 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1678 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1679 UpdateValueMap(I, ResultReg); 1680 return true; 1681} 1682 1683bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1684 MVT VT; 1685 Type *Ty = I->getType(); 1686 if (!isTypeLegal(Ty, VT)) 1687 return false; 1688 1689 // If we have integer div support we should have selected this automagically. 1690 // In case we have a real miss go ahead and return false and we'll pick 1691 // it up later. 1692 if (Subtarget->hasDivide()) return false; 1693 1694 // Otherwise emit a libcall. 1695 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1696 if (VT == MVT::i8) 1697 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1698 else if (VT == MVT::i16) 1699 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1700 else if (VT == MVT::i32) 1701 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1702 else if (VT == MVT::i64) 1703 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1704 else if (VT == MVT::i128) 1705 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1706 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1707 1708 return ARMEmitLibcall(I, LC); 1709} 1710 1711bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1712 MVT VT; 1713 Type *Ty = I->getType(); 1714 if (!isTypeLegal(Ty, VT)) 1715 return false; 1716 1717 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1718 if (VT == MVT::i8) 1719 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1720 else if (VT == MVT::i16) 1721 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1722 else if (VT == MVT::i32) 1723 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1724 else if (VT == MVT::i64) 1725 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1726 else if (VT == MVT::i128) 1727 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1728 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1729 1730 return ARMEmitLibcall(I, LC); 1731} 1732 1733bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1734 EVT DestVT = TLI.getValueType(I->getType(), true); 1735 1736 // We can get here in the case when we have a binary operation on a non-legal 1737 // type and the target independent selector doesn't know how to handle it. 1738 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1739 return false; 1740 1741 unsigned Opc; 1742 switch (ISDOpcode) { 1743 default: return false; 1744 case ISD::ADD: 1745 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1746 break; 1747 case ISD::OR: 1748 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1749 break; 1750 case ISD::SUB: 1751 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1752 break; 1753 } 1754 1755 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1756 if (SrcReg1 == 0) return false; 1757 1758 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1759 // in the instruction, rather then materializing the value in a register. 1760 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1761 if (SrcReg2 == 0) return false; 1762 1763 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1764 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1765 TII.get(Opc), ResultReg) 1766 .addReg(SrcReg1).addReg(SrcReg2)); 1767 UpdateValueMap(I, ResultReg); 1768 return true; 1769} 1770 1771bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1772 EVT VT = TLI.getValueType(I->getType(), true); 1773 1774 // We can get here in the case when we want to use NEON for our fp 1775 // operations, but can't figure out how to. Just use the vfp instructions 1776 // if we have them. 1777 // FIXME: It'd be nice to use NEON instructions. 1778 Type *Ty = I->getType(); 1779 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1780 if (isFloat && !Subtarget->hasVFP2()) 1781 return false; 1782 1783 unsigned Opc; 1784 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1785 switch (ISDOpcode) { 1786 default: return false; 1787 case ISD::FADD: 1788 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1789 break; 1790 case ISD::FSUB: 1791 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1792 break; 1793 case ISD::FMUL: 1794 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1795 break; 1796 } 1797 unsigned Op1 = getRegForValue(I->getOperand(0)); 1798 if (Op1 == 0) return false; 1799 1800 unsigned Op2 = getRegForValue(I->getOperand(1)); 1801 if (Op2 == 0) return false; 1802 1803 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1804 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1805 TII.get(Opc), ResultReg) 1806 .addReg(Op1).addReg(Op2)); 1807 UpdateValueMap(I, ResultReg); 1808 return true; 1809} 1810 1811// Call Handling Code 1812 1813// This is largely taken directly from CCAssignFnForNode - we don't support 1814// varargs in FastISel so that part has been removed. 1815// TODO: We may not support all of this. 1816CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1817 bool Return, 1818 bool isVarArg) { 1819 switch (CC) { 1820 default: 1821 llvm_unreachable("Unsupported calling convention"); 1822 case CallingConv::Fast: 1823 // Ignore fastcc. Silence compiler warnings. 1824 (void)RetFastCC_ARM_APCS; 1825 (void)FastCC_ARM_APCS; 1826 // Fallthrough 1827 case CallingConv::C: 1828 // Use target triple & subtarget features to do actual dispatch. 1829 if (Subtarget->isAAPCS_ABI()) { 1830 if (Subtarget->hasVFP2() && 1831 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1832 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1833 else 1834 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1835 } else 1836 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1837 case CallingConv::ARM_AAPCS_VFP: 1838 if (!isVarArg) 1839 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1840 case CallingConv::ARM_AAPCS: 1841 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1842 case CallingConv::ARM_APCS: 1843 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1844 } 1845} 1846 1847bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1848 SmallVectorImpl<unsigned> &ArgRegs, 1849 SmallVectorImpl<MVT> &ArgVTs, 1850 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1851 SmallVectorImpl<unsigned> &RegArgs, 1852 CallingConv::ID CC, 1853 unsigned &NumBytes, 1854 bool isVarArg) { 1855 SmallVector<CCValAssign, 16> ArgLocs; 1856 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1857 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1858 CCAssignFnForCall(CC, false, isVarArg)); 1859 1860 // Check that we can handle all of the arguments. If we can't, then bail out 1861 // now before we add code to the MBB. 1862 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1863 CCValAssign &VA = ArgLocs[i]; 1864 MVT ArgVT = ArgVTs[VA.getValNo()]; 1865 1866 // We don't handle NEON/vector parameters yet. 1867 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1868 return false; 1869 1870 // Now copy/store arg to correct locations. 1871 if (VA.isRegLoc() && !VA.needsCustom()) { 1872 continue; 1873 } else if (VA.needsCustom()) { 1874 // TODO: We need custom lowering for vector (v2f64) args. 1875 if (VA.getLocVT() != MVT::f64 || 1876 // TODO: Only handle register args for now. 1877 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1878 return false; 1879 } else { 1880 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1881 default: 1882 return false; 1883 case MVT::i1: 1884 case MVT::i8: 1885 case MVT::i16: 1886 case MVT::i32: 1887 break; 1888 case MVT::f32: 1889 if (!Subtarget->hasVFP2()) 1890 return false; 1891 break; 1892 case MVT::f64: 1893 if (!Subtarget->hasVFP2()) 1894 return false; 1895 break; 1896 } 1897 } 1898 } 1899 1900 // At the point, we are able to handle the call's arguments in fast isel. 1901 1902 // Get a count of how many bytes are to be pushed on the stack. 1903 NumBytes = CCInfo.getNextStackOffset(); 1904 1905 // Issue CALLSEQ_START 1906 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1907 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1908 TII.get(AdjStackDown)) 1909 .addImm(NumBytes)); 1910 1911 // Process the args. 1912 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1913 CCValAssign &VA = ArgLocs[i]; 1914 unsigned Arg = ArgRegs[VA.getValNo()]; 1915 MVT ArgVT = ArgVTs[VA.getValNo()]; 1916 1917 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1918 "We don't handle NEON/vector parameters yet."); 1919 1920 // Handle arg promotion, etc. 1921 switch (VA.getLocInfo()) { 1922 case CCValAssign::Full: break; 1923 case CCValAssign::SExt: { 1924 MVT DestVT = VA.getLocVT(); 1925 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1926 assert (Arg != 0 && "Failed to emit a sext"); 1927 ArgVT = DestVT; 1928 break; 1929 } 1930 case CCValAssign::AExt: 1931 // Intentional fall-through. Handle AExt and ZExt. 1932 case CCValAssign::ZExt: { 1933 MVT DestVT = VA.getLocVT(); 1934 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1935 assert (Arg != 0 && "Failed to emit a sext"); 1936 ArgVT = DestVT; 1937 break; 1938 } 1939 case CCValAssign::BCvt: { 1940 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1941 /*TODO: Kill=*/false); 1942 assert(BC != 0 && "Failed to emit a bitcast!"); 1943 Arg = BC; 1944 ArgVT = VA.getLocVT(); 1945 break; 1946 } 1947 default: llvm_unreachable("Unknown arg promotion!"); 1948 } 1949 1950 // Now copy/store arg to correct locations. 1951 if (VA.isRegLoc() && !VA.needsCustom()) { 1952 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1953 VA.getLocReg()) 1954 .addReg(Arg); 1955 RegArgs.push_back(VA.getLocReg()); 1956 } else if (VA.needsCustom()) { 1957 // TODO: We need custom lowering for vector (v2f64) args. 1958 assert(VA.getLocVT() == MVT::f64 && 1959 "Custom lowering for v2f64 args not available"); 1960 1961 CCValAssign &NextVA = ArgLocs[++i]; 1962 1963 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1964 "We only handle register args!"); 1965 1966 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1967 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1968 .addReg(NextVA.getLocReg(), RegState::Define) 1969 .addReg(Arg)); 1970 RegArgs.push_back(VA.getLocReg()); 1971 RegArgs.push_back(NextVA.getLocReg()); 1972 } else { 1973 assert(VA.isMemLoc()); 1974 // Need to store on the stack. 1975 Address Addr; 1976 Addr.BaseType = Address::RegBase; 1977 Addr.Base.Reg = ARM::SP; 1978 Addr.Offset = VA.getLocMemOffset(); 1979 1980 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 1981 assert(EmitRet && "Could not emit a store for argument!"); 1982 } 1983 } 1984 1985 return true; 1986} 1987 1988bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1989 const Instruction *I, CallingConv::ID CC, 1990 unsigned &NumBytes, bool isVarArg) { 1991 // Issue CALLSEQ_END 1992 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1993 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1994 TII.get(AdjStackUp)) 1995 .addImm(NumBytes).addImm(0)); 1996 1997 // Now the return value. 1998 if (RetVT != MVT::isVoid) { 1999 SmallVector<CCValAssign, 16> RVLocs; 2000 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2001 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2002 2003 // Copy all of the result registers out of their specified physreg. 2004 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2005 // For this move we copy into two registers and then move into the 2006 // double fp reg we want. 2007 EVT DestVT = RVLocs[0].getValVT(); 2008 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2009 unsigned ResultReg = createResultReg(DstRC); 2010 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2011 TII.get(ARM::VMOVDRR), ResultReg) 2012 .addReg(RVLocs[0].getLocReg()) 2013 .addReg(RVLocs[1].getLocReg())); 2014 2015 UsedRegs.push_back(RVLocs[0].getLocReg()); 2016 UsedRegs.push_back(RVLocs[1].getLocReg()); 2017 2018 // Finally update the result. 2019 UpdateValueMap(I, ResultReg); 2020 } else { 2021 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2022 EVT CopyVT = RVLocs[0].getValVT(); 2023 2024 // Special handling for extended integers. 2025 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2026 CopyVT = MVT::i32; 2027 2028 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2029 2030 unsigned ResultReg = createResultReg(DstRC); 2031 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2032 ResultReg).addReg(RVLocs[0].getLocReg()); 2033 UsedRegs.push_back(RVLocs[0].getLocReg()); 2034 2035 // Finally update the result. 2036 UpdateValueMap(I, ResultReg); 2037 } 2038 } 2039 2040 return true; 2041} 2042 2043bool ARMFastISel::SelectRet(const Instruction *I) { 2044 const ReturnInst *Ret = cast<ReturnInst>(I); 2045 const Function &F = *I->getParent()->getParent(); 2046 2047 if (!FuncInfo.CanLowerReturn) 2048 return false; 2049 2050 CallingConv::ID CC = F.getCallingConv(); 2051 if (Ret->getNumOperands() > 0) { 2052 SmallVector<ISD::OutputArg, 4> Outs; 2053 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2054 Outs, TLI); 2055 2056 // Analyze operands of the call, assigning locations to each operand. 2057 SmallVector<CCValAssign, 16> ValLocs; 2058 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2059 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2060 F.isVarArg())); 2061 2062 const Value *RV = Ret->getOperand(0); 2063 unsigned Reg = getRegForValue(RV); 2064 if (Reg == 0) 2065 return false; 2066 2067 // Only handle a single return value for now. 2068 if (ValLocs.size() != 1) 2069 return false; 2070 2071 CCValAssign &VA = ValLocs[0]; 2072 2073 // Don't bother handling odd stuff for now. 2074 if (VA.getLocInfo() != CCValAssign::Full) 2075 return false; 2076 // Only handle register returns for now. 2077 if (!VA.isRegLoc()) 2078 return false; 2079 2080 unsigned SrcReg = Reg + VA.getValNo(); 2081 EVT RVVT = TLI.getValueType(RV->getType()); 2082 EVT DestVT = VA.getValVT(); 2083 // Special handling for extended integers. 2084 if (RVVT != DestVT) { 2085 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2086 return false; 2087 2088 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2089 2090 // Perform extension if flagged as either zext or sext. Otherwise, do 2091 // nothing. 2092 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2093 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2094 if (SrcReg == 0) return false; 2095 } 2096 } 2097 2098 // Make the copy. 2099 unsigned DstReg = VA.getLocReg(); 2100 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2101 // Avoid a cross-class copy. This is very unlikely. 2102 if (!SrcRC->contains(DstReg)) 2103 return false; 2104 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2105 DstReg).addReg(SrcReg); 2106 2107 // Mark the register as live out of the function. 2108 MRI.addLiveOut(VA.getLocReg()); 2109 } 2110 2111 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2112 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2113 TII.get(RetOpc))); 2114 return true; 2115} 2116 2117unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2118 if (UseReg) 2119 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2120 else 2121 return isThumb2 ? ARM::tBL : ARM::BL; 2122} 2123 2124unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2125 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2126 GlobalValue::ExternalLinkage, 0, Name); 2127 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2128} 2129 2130// A quick function that will emit a call for a named libcall in F with the 2131// vector of passed arguments for the Instruction in I. We can assume that we 2132// can emit a call for any libcall we can produce. This is an abridged version 2133// of the full call infrastructure since we won't need to worry about things 2134// like computed function pointers or strange arguments at call sites. 2135// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2136// with X86. 2137bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2138 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2139 2140 // Handle *simple* calls for now. 2141 Type *RetTy = I->getType(); 2142 MVT RetVT; 2143 if (RetTy->isVoidTy()) 2144 RetVT = MVT::isVoid; 2145 else if (!isTypeLegal(RetTy, RetVT)) 2146 return false; 2147 2148 // Can't handle non-double multi-reg retvals. 2149 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2150 SmallVector<CCValAssign, 16> RVLocs; 2151 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2152 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2153 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2154 return false; 2155 } 2156 2157 // Set up the argument vectors. 2158 SmallVector<Value*, 8> Args; 2159 SmallVector<unsigned, 8> ArgRegs; 2160 SmallVector<MVT, 8> ArgVTs; 2161 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2162 Args.reserve(I->getNumOperands()); 2163 ArgRegs.reserve(I->getNumOperands()); 2164 ArgVTs.reserve(I->getNumOperands()); 2165 ArgFlags.reserve(I->getNumOperands()); 2166 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2167 Value *Op = I->getOperand(i); 2168 unsigned Arg = getRegForValue(Op); 2169 if (Arg == 0) return false; 2170 2171 Type *ArgTy = Op->getType(); 2172 MVT ArgVT; 2173 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2174 2175 ISD::ArgFlagsTy Flags; 2176 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2177 Flags.setOrigAlign(OriginalAlignment); 2178 2179 Args.push_back(Op); 2180 ArgRegs.push_back(Arg); 2181 ArgVTs.push_back(ArgVT); 2182 ArgFlags.push_back(Flags); 2183 } 2184 2185 // Handle the arguments now that we've gotten them. 2186 SmallVector<unsigned, 4> RegArgs; 2187 unsigned NumBytes; 2188 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2189 RegArgs, CC, NumBytes, false)) 2190 return false; 2191 2192 unsigned CalleeReg = 0; 2193 if (EnableARMLongCalls) { 2194 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2195 if (CalleeReg == 0) return false; 2196 } 2197 2198 // Issue the call. 2199 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2200 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2201 DL, TII.get(CallOpc)); 2202 if (isThumb2) { 2203 // Explicitly adding the predicate here. 2204 AddDefaultPred(MIB); 2205 if (EnableARMLongCalls) 2206 MIB.addReg(CalleeReg); 2207 else 2208 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2209 } else { 2210 if (EnableARMLongCalls) 2211 MIB.addReg(CalleeReg); 2212 else 2213 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2214 2215 // Explicitly adding the predicate here. 2216 AddDefaultPred(MIB); 2217 } 2218 // Add implicit physical register uses to the call. 2219 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2220 MIB.addReg(RegArgs[i]); 2221 2222 // Add a register mask with the call-preserved registers. 2223 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2224 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2225 2226 // Finish off the call including any return values. 2227 SmallVector<unsigned, 4> UsedRegs; 2228 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2229 2230 // Set all unused physreg defs as dead. 2231 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2232 2233 return true; 2234} 2235 2236bool ARMFastISel::SelectCall(const Instruction *I, 2237 const char *IntrMemName = 0) { 2238 const CallInst *CI = cast<CallInst>(I); 2239 const Value *Callee = CI->getCalledValue(); 2240 2241 // Can't handle inline asm. 2242 if (isa<InlineAsm>(Callee)) return false; 2243 2244 // Check the calling convention. 2245 ImmutableCallSite CS(CI); 2246 CallingConv::ID CC = CS.getCallingConv(); 2247 2248 // TODO: Avoid some calling conventions? 2249 2250 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2251 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2252 bool isVarArg = FTy->isVarArg(); 2253 2254 // Handle *simple* calls for now. 2255 Type *RetTy = I->getType(); 2256 MVT RetVT; 2257 if (RetTy->isVoidTy()) 2258 RetVT = MVT::isVoid; 2259 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2260 RetVT != MVT::i8 && RetVT != MVT::i1) 2261 return false; 2262 2263 // Can't handle non-double multi-reg retvals. 2264 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2265 RetVT != MVT::i16 && RetVT != MVT::i32) { 2266 SmallVector<CCValAssign, 16> RVLocs; 2267 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2268 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2269 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2270 return false; 2271 } 2272 2273 // Set up the argument vectors. 2274 SmallVector<Value*, 8> Args; 2275 SmallVector<unsigned, 8> ArgRegs; 2276 SmallVector<MVT, 8> ArgVTs; 2277 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2278 unsigned arg_size = CS.arg_size(); 2279 Args.reserve(arg_size); 2280 ArgRegs.reserve(arg_size); 2281 ArgVTs.reserve(arg_size); 2282 ArgFlags.reserve(arg_size); 2283 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2284 i != e; ++i) { 2285 // If we're lowering a memory intrinsic instead of a regular call, skip the 2286 // last two arguments, which shouldn't be passed to the underlying function. 2287 if (IntrMemName && e-i <= 2) 2288 break; 2289 2290 ISD::ArgFlagsTy Flags; 2291 unsigned AttrInd = i - CS.arg_begin() + 1; 2292 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2293 Flags.setSExt(); 2294 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2295 Flags.setZExt(); 2296 2297 // FIXME: Only handle *easy* calls for now. 2298 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2299 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2300 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2301 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2302 return false; 2303 2304 Type *ArgTy = (*i)->getType(); 2305 MVT ArgVT; 2306 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2307 ArgVT != MVT::i1) 2308 return false; 2309 2310 unsigned Arg = getRegForValue(*i); 2311 if (Arg == 0) 2312 return false; 2313 2314 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2315 Flags.setOrigAlign(OriginalAlignment); 2316 2317 Args.push_back(*i); 2318 ArgRegs.push_back(Arg); 2319 ArgVTs.push_back(ArgVT); 2320 ArgFlags.push_back(Flags); 2321 } 2322 2323 // Handle the arguments now that we've gotten them. 2324 SmallVector<unsigned, 4> RegArgs; 2325 unsigned NumBytes; 2326 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2327 RegArgs, CC, NumBytes, isVarArg)) 2328 return false; 2329 2330 bool UseReg = false; 2331 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2332 if (!GV || EnableARMLongCalls) UseReg = true; 2333 2334 unsigned CalleeReg = 0; 2335 if (UseReg) { 2336 if (IntrMemName) 2337 CalleeReg = getLibcallReg(IntrMemName); 2338 else 2339 CalleeReg = getRegForValue(Callee); 2340 2341 if (CalleeReg == 0) return false; 2342 } 2343 2344 // Issue the call. 2345 unsigned CallOpc = ARMSelectCallOp(UseReg); 2346 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2347 DL, TII.get(CallOpc)); 2348 if(isThumb2) { 2349 // Explicitly adding the predicate here. 2350 AddDefaultPred(MIB); 2351 if (UseReg) 2352 MIB.addReg(CalleeReg); 2353 else if (!IntrMemName) 2354 MIB.addGlobalAddress(GV, 0, 0); 2355 else 2356 MIB.addExternalSymbol(IntrMemName, 0); 2357 } else { 2358 if (UseReg) 2359 MIB.addReg(CalleeReg); 2360 else if (!IntrMemName) 2361 MIB.addGlobalAddress(GV, 0, 0); 2362 else 2363 MIB.addExternalSymbol(IntrMemName, 0); 2364 2365 // Explicitly adding the predicate here. 2366 AddDefaultPred(MIB); 2367 } 2368 2369 // Add implicit physical register uses to the call. 2370 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2371 MIB.addReg(RegArgs[i]); 2372 2373 // Add a register mask with the call-preserved registers. 2374 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2375 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2376 2377 // Finish off the call including any return values. 2378 SmallVector<unsigned, 4> UsedRegs; 2379 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2380 return false; 2381 2382 // Set all unused physreg defs as dead. 2383 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2384 2385 return true; 2386} 2387 2388bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2389 return Len <= 16; 2390} 2391 2392bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2393 uint64_t Len) { 2394 // Make sure we don't bloat code by inlining very large memcpy's. 2395 if (!ARMIsMemCpySmall(Len)) 2396 return false; 2397 2398 // We don't care about alignment here since we just emit integer accesses. 2399 while (Len) { 2400 MVT VT; 2401 if (Len >= 4) 2402 VT = MVT::i32; 2403 else if (Len >= 2) 2404 VT = MVT::i16; 2405 else { 2406 assert(Len == 1); 2407 VT = MVT::i8; 2408 } 2409 2410 bool RV; 2411 unsigned ResultReg; 2412 RV = ARMEmitLoad(VT, ResultReg, Src); 2413 assert (RV == true && "Should be able to handle this load."); 2414 RV = ARMEmitStore(VT, ResultReg, Dest); 2415 assert (RV == true && "Should be able to handle this store."); 2416 (void)RV; 2417 2418 unsigned Size = VT.getSizeInBits()/8; 2419 Len -= Size; 2420 Dest.Offset += Size; 2421 Src.Offset += Size; 2422 } 2423 2424 return true; 2425} 2426 2427bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2428 // FIXME: Handle more intrinsics. 2429 switch (I.getIntrinsicID()) { 2430 default: return false; 2431 case Intrinsic::frameaddress: { 2432 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2433 MFI->setFrameAddressIsTaken(true); 2434 2435 unsigned LdrOpc; 2436 const TargetRegisterClass *RC; 2437 if (isThumb2) { 2438 LdrOpc = ARM::t2LDRi12; 2439 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2440 } else { 2441 LdrOpc = ARM::LDRi12; 2442 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2443 } 2444 2445 const ARMBaseRegisterInfo *RegInfo = 2446 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2447 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2448 unsigned SrcReg = FramePtr; 2449 2450 // Recursively load frame address 2451 // ldr r0 [fp] 2452 // ldr r0 [r0] 2453 // ldr r0 [r0] 2454 // ... 2455 unsigned DestReg; 2456 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2457 while (Depth--) { 2458 DestReg = createResultReg(RC); 2459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2460 TII.get(LdrOpc), DestReg) 2461 .addReg(SrcReg).addImm(0)); 2462 SrcReg = DestReg; 2463 } 2464 UpdateValueMap(&I, SrcReg); 2465 return true; 2466 } 2467 case Intrinsic::memcpy: 2468 case Intrinsic::memmove: { 2469 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2470 // Don't handle volatile. 2471 if (MTI.isVolatile()) 2472 return false; 2473 2474 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2475 // we would emit dead code because we don't currently handle memmoves. 2476 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2477 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2478 // Small memcpy's are common enough that we want to do them without a call 2479 // if possible. 2480 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2481 if (ARMIsMemCpySmall(Len)) { 2482 Address Dest, Src; 2483 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2484 !ARMComputeAddress(MTI.getRawSource(), Src)) 2485 return false; 2486 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2487 return true; 2488 } 2489 } 2490 2491 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2492 return false; 2493 2494 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2495 return false; 2496 2497 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2498 return SelectCall(&I, IntrMemName); 2499 } 2500 case Intrinsic::memset: { 2501 const MemSetInst &MSI = cast<MemSetInst>(I); 2502 // Don't handle volatile. 2503 if (MSI.isVolatile()) 2504 return false; 2505 2506 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2507 return false; 2508 2509 if (MSI.getDestAddressSpace() > 255) 2510 return false; 2511 2512 return SelectCall(&I, "memset"); 2513 } 2514 case Intrinsic::trap: { 2515 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2516 return true; 2517 } 2518 } 2519} 2520 2521bool ARMFastISel::SelectTrunc(const Instruction *I) { 2522 // The high bits for a type smaller than the register size are assumed to be 2523 // undefined. 2524 Value *Op = I->getOperand(0); 2525 2526 EVT SrcVT, DestVT; 2527 SrcVT = TLI.getValueType(Op->getType(), true); 2528 DestVT = TLI.getValueType(I->getType(), true); 2529 2530 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2531 return false; 2532 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2533 return false; 2534 2535 unsigned SrcReg = getRegForValue(Op); 2536 if (!SrcReg) return false; 2537 2538 // Because the high bits are undefined, a truncate doesn't generate 2539 // any code. 2540 UpdateValueMap(I, SrcReg); 2541 return true; 2542} 2543 2544unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2545 bool isZExt) { 2546 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2547 return 0; 2548 2549 unsigned Opc; 2550 bool isBoolZext = false; 2551 if (!SrcVT.isSimple()) return 0; 2552 switch (SrcVT.getSimpleVT().SimpleTy) { 2553 default: return 0; 2554 case MVT::i16: 2555 if (!Subtarget->hasV6Ops()) return 0; 2556 if (isZExt) 2557 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2558 else 2559 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2560 break; 2561 case MVT::i8: 2562 if (!Subtarget->hasV6Ops()) return 0; 2563 if (isZExt) 2564 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2565 else 2566 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2567 break; 2568 case MVT::i1: 2569 if (isZExt) { 2570 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2571 isBoolZext = true; 2572 break; 2573 } 2574 return 0; 2575 } 2576 2577 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2578 MachineInstrBuilder MIB; 2579 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2580 .addReg(SrcReg); 2581 if (isBoolZext) 2582 MIB.addImm(1); 2583 else 2584 MIB.addImm(0); 2585 AddOptionalDefs(MIB); 2586 return ResultReg; 2587} 2588 2589bool ARMFastISel::SelectIntExt(const Instruction *I) { 2590 // On ARM, in general, integer casts don't involve legal types; this code 2591 // handles promotable integers. 2592 Type *DestTy = I->getType(); 2593 Value *Src = I->getOperand(0); 2594 Type *SrcTy = Src->getType(); 2595 2596 EVT SrcVT, DestVT; 2597 SrcVT = TLI.getValueType(SrcTy, true); 2598 DestVT = TLI.getValueType(DestTy, true); 2599 2600 bool isZExt = isa<ZExtInst>(I); 2601 unsigned SrcReg = getRegForValue(Src); 2602 if (!SrcReg) return false; 2603 2604 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2605 if (ResultReg == 0) return false; 2606 UpdateValueMap(I, ResultReg); 2607 return true; 2608} 2609 2610// TODO: SoftFP support. 2611bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2612 2613 switch (I->getOpcode()) { 2614 case Instruction::Load: 2615 return SelectLoad(I); 2616 case Instruction::Store: 2617 return SelectStore(I); 2618 case Instruction::Br: 2619 return SelectBranch(I); 2620 case Instruction::IndirectBr: 2621 return SelectIndirectBr(I); 2622 case Instruction::ICmp: 2623 case Instruction::FCmp: 2624 return SelectCmp(I); 2625 case Instruction::FPExt: 2626 return SelectFPExt(I); 2627 case Instruction::FPTrunc: 2628 return SelectFPTrunc(I); 2629 case Instruction::SIToFP: 2630 return SelectIToFP(I, /*isSigned*/ true); 2631 case Instruction::UIToFP: 2632 return SelectIToFP(I, /*isSigned*/ false); 2633 case Instruction::FPToSI: 2634 return SelectFPToI(I, /*isSigned*/ true); 2635 case Instruction::FPToUI: 2636 return SelectFPToI(I, /*isSigned*/ false); 2637 case Instruction::Add: 2638 return SelectBinaryIntOp(I, ISD::ADD); 2639 case Instruction::Or: 2640 return SelectBinaryIntOp(I, ISD::OR); 2641 case Instruction::Sub: 2642 return SelectBinaryIntOp(I, ISD::SUB); 2643 case Instruction::FAdd: 2644 return SelectBinaryFPOp(I, ISD::FADD); 2645 case Instruction::FSub: 2646 return SelectBinaryFPOp(I, ISD::FSUB); 2647 case Instruction::FMul: 2648 return SelectBinaryFPOp(I, ISD::FMUL); 2649 case Instruction::SDiv: 2650 return SelectDiv(I, /*isSigned*/ true); 2651 case Instruction::UDiv: 2652 return SelectDiv(I, /*isSigned*/ false); 2653 case Instruction::SRem: 2654 return SelectRem(I, /*isSigned*/ true); 2655 case Instruction::URem: 2656 return SelectRem(I, /*isSigned*/ false); 2657 case Instruction::Call: 2658 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2659 return SelectIntrinsicCall(*II); 2660 return SelectCall(I); 2661 case Instruction::Select: 2662 return SelectSelect(I); 2663 case Instruction::Ret: 2664 return SelectRet(I); 2665 case Instruction::Trunc: 2666 return SelectTrunc(I); 2667 case Instruction::ZExt: 2668 case Instruction::SExt: 2669 return SelectIntExt(I); 2670 default: break; 2671 } 2672 return false; 2673} 2674 2675/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2676/// vreg is being provided by the specified load instruction. If possible, 2677/// try to fold the load as an operand to the instruction, returning true if 2678/// successful. 2679bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2680 const LoadInst *LI) { 2681 // Verify we have a legal type before going any further. 2682 MVT VT; 2683 if (!isLoadTypeLegal(LI->getType(), VT)) 2684 return false; 2685 2686 // Combine load followed by zero- or sign-extend. 2687 // ldrb r1, [r0] ldrb r1, [r0] 2688 // uxtb r2, r1 => 2689 // mov r3, r2 mov r3, r1 2690 bool isZExt = true; 2691 switch(MI->getOpcode()) { 2692 default: return false; 2693 case ARM::SXTH: 2694 case ARM::t2SXTH: 2695 isZExt = false; 2696 case ARM::UXTH: 2697 case ARM::t2UXTH: 2698 if (VT != MVT::i16) 2699 return false; 2700 break; 2701 case ARM::SXTB: 2702 case ARM::t2SXTB: 2703 isZExt = false; 2704 case ARM::UXTB: 2705 case ARM::t2UXTB: 2706 if (VT != MVT::i8) 2707 return false; 2708 break; 2709 } 2710 // See if we can handle this address. 2711 Address Addr; 2712 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2713 2714 unsigned ResultReg = MI->getOperand(0).getReg(); 2715 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2716 return false; 2717 MI->eraseFromParent(); 2718 return true; 2719} 2720 2721namespace llvm { 2722 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2723 // Completely untested on non-iOS. 2724 const TargetMachine &TM = funcInfo.MF->getTarget(); 2725 2726 // Darwin and thumb1 only for now. 2727 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2728 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2729 return new ARMFastISel(funcInfo); 2730 return 0; 2731 } 2732} 2733